diff --git a/Jenkinsfile b/Jenkinsfile index 3309a4fa60f..93c86bfd112 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -49,7 +49,7 @@ def el7_component_repos = "" def component_repos = "" def daos_repo = "daos@${env.BRANCH_NAME}:${env.BUILD_NUMBER}" def el7_daos_repos = el7_component_repos + ' ' + component_repos + ' ' + daos_repo -def functional_rpms = "--exclude openmpi openmpi3 hwloc ndctl spdk-tools " + +def functional_rpms = "--exclude openmpi openmpi3 hwloc ndctl " + "ior-hpc-cart-4-daos-0 mpich-autoload-cart-4-daos-0 " + "romio-tests-cart-4-daos-0 hdf5-tests-cart-4-daos-0 " + "mpi4py-tests-cart-4-daos-0 testmpio-cart-4-daos-0" @@ -262,7 +262,41 @@ pipeline { } */ } - } + } // stage('checkpatch') + stage('Python Bandit check') { + when { + beforeAgent true + expression { + ! commitPragma(pragma: 'Skip-python-bandit').contains('true') + } + } + agent { + dockerfile { + filename 'Dockerfile.code_scanning' + dir 'utils/docker' + label 'docker_runner' + additionalBuildArgs '--build-arg UID=$(id -u) --build-arg JENKINS_URL=' + + env.JENKINS_URL + } + } + steps { + githubNotify credentialsId: 'daos-jenkins-commit-status', + description: env.STAGE_NAME, + context: "build" + "/" + env.STAGE_NAME, + status: "PENDING" + checkoutScm withSubmodules: true + catchError(stageResult: 'UNSTABLE', buildResult: 'SUCCESS') { + runTest script: 'bandit -r . --format xml -o bandit.xml', + junit_files: "bandit.xml", + ignore_failure: true + } + } + post { + always { + junit 'bandit.xml' + } + } + } // stage('Python Bandit check') } } stage('Build') { @@ -1031,24 +1065,12 @@ pipeline { } parallel { stage('Coverity on CentOS 7') { - // Eventually this will only run on Master builds. - // Unfortunately for now, a PR build could break - // the quickbuild, which would not be detected until - // the master build fails. -// when { -// beforeAgent true -// anyOf { -// branch 'master' -// not { -// // expression returns false on grep match -// expression { -// sh script: 'git show -s --format=%B |' + -// ' grep "^Coverity-test: true"', -// returnStatus: true -// } -// } -// } -// } + when { + beforeAgent true + expression { + ! commitPragma(pragma: 'Skip-coverity-test').contains('true') + } + } agent { dockerfile { filename 'Dockerfile.centos.7' @@ -1528,7 +1550,9 @@ pipeline { allOf { not { branch 'weekly-testing' } not { environment name: 'CHANGE_TARGET', value: 'weekly-testing' } - // expression { ! skip_stage('scan-centos-rpms') } + expression { + ! commitPragma(pragma: 'Skip-scan-centos-rpms').contains('true') + } } } agent { diff --git a/SConstruct b/SConstruct index de7840a3af8..89774d3afea 100644 --- a/SConstruct +++ b/SConstruct @@ -343,7 +343,6 @@ def scons(): # pylint: disable=too-many-locals preload_prereqs(prereqs) if prereqs.check_component('valgrind_devel'): env.AppendUnique(CPPDEFINES=["DAOS_HAS_VALGRIND"]) - prereqs.has_source(env, 'fio') prereqs.add_opts(('GO_BIN', 'Full path to go binary', None)) opts.Save(opts_file, env) diff --git a/doc/admin/installation.md b/doc/admin/installation.md index f5f9cf24f3f..f1d83d22924 100644 --- a/doc/admin/installation.md +++ b/doc/admin/installation.md @@ -101,7 +101,7 @@ $ scons --config=force install ``` If you are a developer of DAOS, we recommend following the instructions in the -[DAOS for Development](https://daos-stack.github.io/admin/installation/#daos-for-development) section. +[DAOS for Development](https://daos-stack.github.io/dev/development/#building-daos-for-development) section. Otherwise, the missing dependencies can be built automatically by invoking scons with the following parameters: diff --git a/doc/man/man8/daos.8 b/doc/man/man8/daos.8 index 71f1bf4fdce..87834bb9a52 100644 --- a/doc/man/man8/daos.8 +++ b/doc/man/man8/daos.8 @@ -40,9 +40,13 @@ The \fBRESOURCE\fRs, respective \fBCOMMAND\fRs and \fBOPTION\fRs supported by \f \fBcontainer \fR(\fBcont\fR) container in a pool .br \fBobject \fR(\fBobj\fR) object in a container +.br + \fBversion\fR print command version .br \fBhelp\fR print this message and exit .TP +.I help \fR[\fBRESOURCE \fR[\fBCOMMAND\fR]] \h'4' per-resource/command help +.TP .I pool \fBCOMMAND\fRs: \fBlist-containers\fR list all containers in pool .br diff --git a/doc/overview/architecture.md b/doc/overview/architecture.md index 023ceeef277..e000857c310 100644 --- a/doc/overview/architecture.md +++ b/doc/overview/architecture.md @@ -49,7 +49,7 @@ Persistent Memory Development Kit (PMDK)[^1] allows managing transactional access to SCM and the Storage Performance Development Kit (SPDK)[^2] enables user-space I/O to NVMe devices. -![](./media/image1.png) +![](../admin/media/image1.png) Figure 2-1. DAOS Storage DAOS aims at delivering: diff --git a/doc/user/spark.md b/doc/user/spark.md index 0819324e09d..8676b4034bc 100644 --- a/doc/user/spark.md +++ b/doc/user/spark.md @@ -1,4 +1,5 @@ # Getting Started with the DAOS Hadoop Filesystem + Here, we describe the steps required to build and deploy the DAOS Hadoop filesystem, and the configurations to access DAOS in Spark. We assume DAOS servers and agents have already been deployed in the environment; otherwise, @@ -6,10 +7,12 @@ they can be deployed by following the [DAOS installation guide](https://daos-stack.github.io/admin/installation/). ## Build DAOS Hadoop Filesystem -The DAOS DFS Java API and Hadoop filesystem implementation have been merged into -the DAOS repository. Below are the steps to build the java jar files for the DFS -Java API and DAOS Hadoop filesystem. These jar files are required when running -Spark. You can ignore this section if you already have the pre-built jars. + +The DAOS Java and Hadoop filesystem implementation have been merged into +the DAOS repository. Below are the steps to build the Java jar files for the +DAOS Java and DAOS Hadoop filesystem. These jar files are required when +running Spark. You can ignore this section if you already have the pre-built +jars. ```bash $ git clone https://github.com/daos-stack/daos.git @@ -24,10 +27,11 @@ After build, the package daos-java--assemble.tgz will be available under distribution/target. ## Deploy DAOS Hadoop Filesystem -After unzipping `daos-java--assemble.tgz`, you will get the following -files. -* `daos-java-api-.jar` and `hadoop-daos-.jar` +After unzipping `daos-java--assemble.tgz`, you will get the +following files. + +* `daos-java-.jar` and `hadoop-daos-.jar` These files need to be deployed on every compute node that runs Spark. Place them in a directory, e.g., $SPARK_HOME/jars, that are accessible to all the nodes or copy them to every node. @@ -44,8 +48,8 @@ of the Spark executor and driver. This can be configured in Spark's configuration file spark-defaults.conf. ``` -spark.executor.extraClassPath /path/to/daos-java-api-.jar:/path/to/hadoop-daos-.jar -spark.driver.extraClassPath /path/to/daos-java-api-.jar:/path/to/hadoop-daos-.jar +spark.executor.extraClassPath /path/to/daos-java-.jar:/path/to/hadoop-daos-.jar +spark.driver.extraClassPath /path/to/daos-java-.jar:/path/to/hadoop-daos-.jar ``` * Next, export all DAOS related env variables and the following env variable in @@ -136,12 +140,13 @@ container UUID. Then set `fs.daos.preload.size` to a value greater than 0 and `c2.fs.daos.preload.size` to 0. ## Access DAOS in Spark + All Spark APIs that work with the Hadoop filesystem will work with DAOS. We use the `daos://` URI to access files stored in DAOS. For example, to read people.json file from the root directory of DAOS filesystem, we can use the following pySpark code: - ```python df = spark.read.json("daos://default:1/people.json") ``` + diff --git a/src/bio/SConscript b/src/bio/SConscript index c8292e5cf8c..8fac6552400 100644 --- a/src/bio/SConscript +++ b/src/bio/SConscript @@ -14,22 +14,17 @@ def scons(): SConscript('smd/SConscript') denv.AppendUnique(LIBPATH=['smd']) - # Link to DPDK static libs - denv.AppendUnique(LINKFLAGS=['-Wl,--whole-archive', \ - '-lrte_mempool', '-lrte_mempool_ring', '-lrte_bus_pci', \ - '-lrte_pci', '-lrte_ring', '-lrte_mbuf', '-lrte_eal', \ - '-lrte_kvargs', \ - '-Wl,--no-whole-archive']) - - # SPDK shared libs + # SPDK related libs libs = ['spdk_env_dpdk', 'spdk_thread', 'spdk_bdev', 'spdk_copy'] + libs += ['rte_mempool', 'rte_mempool_ring', 'rte_bus_pci'] + libs += ['rte_pci', 'rte_ring', 'rte_mbuf', 'rte_eal', 'rte_kvargs'] libs += ['spdk_bdev_aio', 'spdk_bdev_nvme', 'spdk_bdev_malloc'] libs += ['spdk_conf', 'spdk_blob', 'spdk_nvme', 'spdk_util'] libs += ['spdk_json', 'spdk_jsonrpc', 'spdk_rpc', 'spdk_trace'] libs += ['spdk_sock', 'spdk_log', 'spdk_notify', 'spdk_blob_bdev'] # Other libs - libs += ['numa', 'smd'] + libs += ['numa', 'dl', 'smd'] bio = daos_build.library(denv, "bio", Glob('*.c'), LIBS=libs) denv.Install('$PREFIX/lib64/daos_srv', bio) diff --git a/src/bio/bio_monitor.c b/src/bio/bio_monitor.c index 48c5a25136e..78981426a3f 100644 --- a/src/bio/bio_monitor.c +++ b/src/bio/bio_monitor.c @@ -153,11 +153,12 @@ get_spdk_err_log_page_completion(struct spdk_bdev_io *bdev_io, bool success, { struct bio_dev_health *dev_health = cb_arg; int sc, sct; + uint32_t cdw0; D_ASSERT(dev_health->bdh_inflights == 1); /* Additional NVMe status information */ - spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc); + spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); if (sc) D_ERROR("NVMe status code/type: %d/%d\n", sc, sct); @@ -180,11 +181,12 @@ get_spdk_identify_ctrlr_completion(struct spdk_bdev_io *bdev_io, bool success, uint32_t numd, numdl, numdu; int rc; int sc, sct; + uint32_t cdw0; D_ASSERT(dev_health->bdh_inflights == 1); /* Additional NVMe status information */ - spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc); + spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); if (sc) { D_ERROR("NVMe status code/type: %d/%d\n", sc, sct); dev_health->bdh_inflights--; @@ -248,11 +250,12 @@ get_spdk_log_page_completion(struct spdk_bdev_io *bdev_io, bool success, uint8_t crit_warn; int rc; int sc, sct; + uint32_t cdw0; D_ASSERT(dev_health->bdh_inflights == 1); /* Additional NVMe status information */ - spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc); + spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc); if (sc) { D_ERROR("NVMe status code/type: %d/%d\n", sc, sct); dev_health->bdh_inflights--; diff --git a/src/bio/bio_xstream.c b/src/bio/bio_xstream.c index 431a6a01d0f..34edaf932c9 100644 --- a/src/bio/bio_xstream.c +++ b/src/bio/bio_xstream.c @@ -39,9 +39,6 @@ #include "bio_internal.h" #include -/* FIXME: remove it once SPDK being upgraded */ -void spdk_set_thread(struct spdk_thread *thread); - /* These Macros should be turned into DAOS configuration in the future */ #define DAOS_MSG_RING_SZ 4096 /* SPDK blob parameters */ diff --git a/src/client/api/init.c b/src/client/api/init.c index 413a60f8be9..5700cfb0e34 100644 --- a/src/client/api/init.c +++ b/src/client/api/init.c @@ -43,7 +43,6 @@ static pthread_mutex_t module_lock = PTHREAD_MUTEX_INITIALIZER; static bool module_initialized; -bool dfs_no_cond_op; const struct daos_task_api dc_funcs[] = { /** Managment */ @@ -194,8 +193,6 @@ daos_init(void) if (rc != 0) D_GOTO(out_obj, rc); - d_getenv_bool("DFS_NO_COND_OP", &dfs_no_cond_op); - module_initialized = true; D_GOTO(unlock, rc = 0); diff --git a/src/client/dfs/dfs.c b/src/client/dfs/dfs.c index 026896e0763..676d305d7bd 100644 --- a/src/client/dfs/dfs.c +++ b/src/client/dfs/dfs.c @@ -312,16 +312,6 @@ fetch_entry(daos_handle_t oh, daos_handle_t th, const char *name, d_iov_set(&sg_iovs[i++], &entry->ctime, sizeof(time_t)); d_iov_set(&sg_iovs[i++], &entry->chunk_size, sizeof(daos_size_t)); - if (fetch_sym) { - D_ALLOC(value, PATH_MAX); - if (value == NULL) - return ENOMEM; - - recx.rx_nr += PATH_MAX; - /** Set Akey for Symlink Value, will be empty if no symlink */ - d_iov_set(&sg_iovs[i++], value, PATH_MAX); - } - sgl.sg_nr = i; sgl.sg_nr_out = 0; sgl.sg_iovs = sg_iovs; @@ -333,15 +323,34 @@ fetch_entry(daos_handle_t oh, daos_handle_t th, const char *name, } if (fetch_sym && S_ISLNK(entry->mode)) { - if (sgl.sg_nr_out == i) { - size_t sym_len = sg_iovs[i-1].iov_len; - - if (sym_len != 0) { - D_ASSERT(value); - D_STRNDUP(entry->value, value, PATH_MAX - 1); - if (entry->value == NULL) - D_GOTO(out, rc = ENOMEM); - } + size_t sym_len; + + D_ALLOC(value, PATH_MAX); + if (value == NULL) + return ENOMEM; + + recx.rx_idx = sizeof(mode_t) + sizeof(time_t) * 3 + + sizeof(daos_obj_id_t) + sizeof(daos_size_t); + recx.rx_nr = PATH_MAX; + + d_iov_set(&sg_iovs[0], value, PATH_MAX); + sgl.sg_nr = 1; + sgl.sg_nr_out = 0; + sgl.sg_iovs = sg_iovs; + + rc = daos_obj_fetch(oh, th, 0, &dkey, 1, &iod, &sgl, NULL, + NULL); + if (rc) { + D_ERROR("Failed to fetch entry %s (%d)\n", name, rc); + D_GOTO(out, rc = daos_der2errno(rc)); + } + + sym_len = sg_iovs[0].iov_len; + if (sym_len != 0) { + D_ASSERT(value); + D_STRNDUP(entry->value, value, PATH_MAX - 1); + if (entry->value == NULL) + D_GOTO(out, rc = ENOMEM); } } @@ -358,16 +367,12 @@ fetch_entry(daos_handle_t oh, daos_handle_t th, const char *name, static int remove_entry(dfs_t *dfs, daos_handle_t th, daos_handle_t parent_oh, - const char *name, bool cond_check, struct dfs_entry entry) + const char *name, struct dfs_entry entry) { - uint64_t cond = 0; daos_key_t dkey; daos_handle_t oh; int rc; - if (cond_check && !dfs_no_cond_op) - cond = DAOS_COND_PUNCH; - if (S_ISLNK(entry.mode)) goto punch_entry; @@ -387,7 +392,8 @@ remove_entry(dfs_t *dfs, daos_handle_t th, daos_handle_t parent_oh, punch_entry: d_iov_set(&dkey, (void *)name, strlen(name)); - rc = daos_obj_punch_dkeys(parent_oh, th, cond, 1, &dkey, NULL); + rc = daos_obj_punch_dkeys(parent_oh, th, DAOS_COND_PUNCH, 1, &dkey, + NULL); return daos_der2errno(rc); } @@ -404,20 +410,8 @@ insert_entry(daos_handle_t oh, daos_handle_t th, const char *name, unsigned int i; int rc; - if (cond_check && !dfs_no_cond_op) { + if (cond_check) cond = DAOS_COND_DKEY_INSERT; - } else if (cond_check) { - /** if cond_ops not enabled, fetch and check (non-atomically) */ - struct dfs_entry check_entry = {0}; - bool exists; - - /* Check if parent has the dirname entry */ - rc = fetch_entry(oh, th, name, true, &exists, &check_entry); - if (rc) - return rc; - if (exists) - return EEXIST; - } d_iov_set(&dkey, (void *)name, strlen(name)); d_iov_set(&iod.iod_name, INODE_AKEY_NAME, strlen(INODE_AKEY_NAME)); @@ -552,7 +546,7 @@ entry_stat(dfs_t *dfs, daos_handle_t th, daos_handle_t oh, const char *name, break; } case S_IFLNK: - size = strlen(entry.value) + 1; + size = strlen(entry.value); D_FREE(entry.value); break; default: @@ -923,8 +917,6 @@ open_sb(daos_handle_t coh, bool create, dfs_attr_t *attr, daos_handle_t *oh) /** create the SB and exit */ if (create) { - uint64_t cond = 0; - iods[0].iod_size = sizeof(magic); magic = DFS_SB_MAGIC; iods[1].iod_size = sizeof(sb_ver); @@ -942,11 +934,8 @@ open_sb(daos_handle_t coh, bool create, dfs_attr_t *attr, daos_handle_t *oh) else oclass = DFS_DEFAULT_OBJ_CLASS; - if (!dfs_no_cond_op) - cond = DAOS_COND_DKEY_INSERT; - - rc = daos_obj_update(*oh, DAOS_TX_NONE, cond, &dkey, SB_AKEYS, - iods, sgls, NULL); + rc = daos_obj_update(*oh, DAOS_TX_NONE, DAOS_COND_DKEY_INSERT, + &dkey, SB_AKEYS, iods, sgls, NULL); if (rc) { D_ERROR("Failed to update SB info (%d)\n", rc); D_GOTO(err, rc = daos_der2errno(rc)); @@ -1151,6 +1140,7 @@ dfs_mount(daos_handle_t poh, daos_handle_t coh, int flags, dfs_t **_dfs) entry = daos_prop_entry_get(prop, DAOS_PROP_CO_LAYOUT_TYPE); if (entry == NULL || entry->dpe_val != DAOS_PROP_CO_LAYOUT_POSIX) { + D_ERROR("container is not of type POSIX\n"); daos_prop_free(prop); return EINVAL; } @@ -1641,8 +1631,7 @@ remove_dir_contents(dfs_t *dfs, daos_handle_t th, struct dfs_entry entry) D_GOTO(out, rc); } - rc = remove_entry(dfs, th, oh, entry_name, true, - child_entry); + rc = remove_entry(dfs, th, oh, entry_name, child_entry); if (rc) D_GOTO(out, rc); } @@ -1717,7 +1706,7 @@ dfs_remove(dfs_t *dfs, dfs_obj_t *parent, const char *name, bool force, } } - rc = remove_entry(dfs, th, parent->oh, name, true, entry); + rc = remove_entry(dfs, th, parent->oh, name, entry); if (rc) D_GOTO(out, rc); @@ -3027,7 +3016,6 @@ dfs_chmod(dfs_t *dfs, dfs_obj_t *parent, const char *name, mode_t mode) daos_iod_t iod; daos_recx_t recx; daos_key_t dkey; - uint64_t cond = 0; int rc; if (dfs == NULL || !dfs->mounted) @@ -3110,10 +3098,8 @@ dfs_chmod(dfs_t *dfs, dfs_obj_t *parent, const char *name, mode_t mode) sgl.sg_nr_out = 0; sgl.sg_iovs = &sg_iov; - if (!dfs_no_cond_op) - cond = DAOS_COND_DKEY_UPDATE; - - rc = daos_obj_update(oh, th, cond, &dkey, 1, &iod, &sgl, NULL); + rc = daos_obj_update(oh, th, DAOS_COND_DKEY_UPDATE, &dkey, 1, &iod, + &sgl, NULL); if (rc) { D_ERROR("Failed to update mode (rc = %d)\n", rc); D_GOTO(out, rc = daos_der2errno(rc)); @@ -3139,7 +3125,6 @@ dfs_osetattr(dfs_t *dfs, dfs_obj_t *obj, struct stat *stbuf, int flags) daos_recx_t recx[3]; bool set_size = false; int i = 0; - uint64_t cond = 0; int rc; if (dfs == NULL || !dfs->mounted) @@ -3222,10 +3207,8 @@ dfs_osetattr(dfs_t *dfs, dfs_obj_t *obj, struct stat *stbuf, int flags) sgl.sg_nr_out = 0; sgl.sg_iovs = &sg_iovs[0]; - if (!dfs_no_cond_op) - cond = DAOS_COND_DKEY_UPDATE; - - rc = daos_obj_update(oh, th, cond, &dkey, 1, &iod, &sgl, NULL); + rc = daos_obj_update(oh, th, DAOS_COND_DKEY_UPDATE, &dkey, 1, &iod, + &sgl, NULL); if (rc) { D_ERROR("Failed to update attr (rc = %d)\n", rc); D_GOTO(out_obj, rc = daos_der2errno(rc)); @@ -3364,7 +3347,6 @@ dfs_move(dfs_t *dfs, dfs_obj_t *parent, char *name, dfs_obj_t *new_parent, daos_handle_t th = DAOS_TX_NONE; bool exists; daos_key_t dkey; - uint64_t cond = 0; int rc; if (dfs == NULL || !dfs->mounted) @@ -3454,8 +3436,7 @@ dfs_move(dfs_t *dfs, dfs_obj_t *parent, char *name, dfs_obj_t *new_parent, } } - rc = remove_entry(dfs, th, new_parent->oh, new_name, true, - new_entry); + rc = remove_entry(dfs, th, new_parent->oh, new_name, new_entry); if (rc) { D_ERROR("Failed to remove entry %s (%d)\n", new_name, rc); @@ -3468,7 +3449,7 @@ dfs_move(dfs_t *dfs, dfs_obj_t *parent, char *name, dfs_obj_t *new_parent, /** rename symlink */ if (S_ISLNK(entry.mode)) { - rc = remove_entry(dfs, th, parent->oh, name, true, entry); + rc = remove_entry(dfs, th, parent->oh, name, entry); if (rc) { D_ERROR("Failed to remove entry %s (%d)\n", name, rc); @@ -3490,12 +3471,10 @@ dfs_move(dfs_t *dfs, dfs_obj_t *parent, char *name, dfs_obj_t *new_parent, D_GOTO(out, rc); } - if (!dfs_no_cond_op) - cond = DAOS_COND_PUNCH; - /** remove the old entry from the old parent (just the dkey) */ d_iov_set(&dkey, (void *)name, strlen(name)); - rc = daos_obj_punch_dkeys(parent->oh, th, cond, 1, &dkey, NULL); + rc = daos_obj_punch_dkeys(parent->oh, th, DAOS_COND_PUNCH, 1, &dkey, + NULL); if (rc) { D_ERROR("Punch entry %s failed (%d)\n", name, rc); D_GOTO(out, rc = daos_der2errno(rc)); @@ -3586,7 +3565,7 @@ dfs_exchange(dfs_t *dfs, dfs_obj_t *parent1, char *name1, dfs_obj_t *parent2, entry1.atime = entry1.mtime = entry1.ctime = time(NULL); /** insert entry1 in parent2 object */ - rc = insert_entry(parent2->oh, th, name1, false, entry1); + rc = insert_entry(parent2->oh, th, name1, true, entry1); if (rc) { D_ERROR("Inserting entry %s failed (%d)\n", name1, rc); D_GOTO(out, rc); @@ -3594,7 +3573,7 @@ dfs_exchange(dfs_t *dfs, dfs_obj_t *parent1, char *name1, dfs_obj_t *parent2, entry2.atime = entry2.mtime = entry2.ctime = time(NULL); /** insert entry2 in parent1 object */ - rc = insert_entry(parent1->oh, th, name2, false, entry2); + rc = insert_entry(parent1->oh, th, name2, true, entry2); if (rc) { D_ERROR("Inserting entry %s failed (%d)\n", name2, rc); D_GOTO(out, rc); @@ -3683,33 +3662,10 @@ dfs_setxattr(dfs_t *dfs, dfs_obj_t *obj, const char *name, /** if not default flag, check for xattr existence */ if (flags != 0) { - if (!dfs_no_cond_op) { - if (flags == XATTR_CREATE) - cond |= DAOS_COND_AKEY_INSERT; - if (flags == XATTR_REPLACE) - cond |= DAOS_COND_AKEY_UPDATE; - } else { - bool exists; - - iod.iod_size = DAOS_REC_ANY; - rc = daos_obj_fetch(oh, th, 0, &dkey, 1, &iod, - NULL, NULL, NULL); - if (rc) { - D_ERROR("Failed to get extended attribute %s\n", - name); - D_GOTO(out, rc = daos_der2errno(rc)); - } - - if (iod.iod_size == 0) - exists = false; - else - exists = true; - - if (flags == XATTR_CREATE && exists) - D_GOTO(out, rc = EEXIST); - if (flags == XATTR_REPLACE && !exists) - D_GOTO(out, rc = ENOENT); - } + if (flags == XATTR_CREATE) + cond |= DAOS_COND_AKEY_INSERT; + if (flags == XATTR_REPLACE) + cond |= DAOS_COND_AKEY_UPDATE; } /** set sgl for update */ @@ -3718,9 +3674,7 @@ dfs_setxattr(dfs_t *dfs, dfs_obj_t *obj, const char *name, sgl.sg_nr_out = 0; sgl.sg_iovs = &sg_iov; - if (!dfs_no_cond_op) - cond |= DAOS_COND_DKEY_UPDATE; - + cond |= DAOS_COND_DKEY_UPDATE; iod.iod_size = size; rc = daos_obj_update(oh, th, cond, &dkey, 1, &iod, &sgl, NULL); if (rc) { @@ -3844,9 +3798,7 @@ dfs_removexattr(dfs_t *dfs, dfs_obj_t *obj, const char *name) /** set akey as the xattr name */ d_iov_set(&akey, xname, strlen(xname)); - if (!dfs_no_cond_op) - cond = DAOS_COND_DKEY_UPDATE | DAOS_COND_PUNCH; - + cond = DAOS_COND_DKEY_UPDATE | DAOS_COND_PUNCH; rc = daos_obj_punch_akeys(oh, th, cond, &dkey, 1, &akey, NULL); if (rc) { D_ERROR("Failed to punch extended attribute %s\n", name); diff --git a/src/client/java/README.md b/src/client/java/README.md index 53666a56000..38028dc9950 100644 --- a/src/client/java/README.md +++ b/src/client/java/README.md @@ -1,8 +1,8 @@ ## Description -This module is DAOS DFS Java API and DAOS DFS implementation of Hadoop FileSystem. There are two submodules, -daos-java-api and hadoop-daos. +This module is DAOS Java client and DAOS DFS implementation of Hadoop FileSystem. There are two submodules, +daos-client and hadoop-daos. -### daos-java-api +### daos-client It wraps most of common APIs from daos_fs.h, as well as some pool and container connection related APIs from daos_api.h. There are two main classes, DaosFsClient and DaosFile. @@ -23,7 +23,7 @@ object is cached and remain open until being released. Later DFS operations don' need to lookup repeatedly for each FS operation. ### hadoop-daos -It's DAOS FS implementation of Hadoop FileSystem based on daos-java-api. There are three main classes, DaosFileSystem, +It's DAOS FS implementation of Hadoop FileSystem based on daos-client. There are three main classes, DaosFileSystem, DaosInputStream and DaosOutputStream. * DaosFileSystem, it provides APIs to create file as DaosOutputStream, open file as DaosInputStream, list file @@ -38,11 +38,11 @@ for configuration items, defaults and their description. ## Build It's Java module and built by Maven. Java 1.8 and Maven 3 are required to build this module. After they are installed, -you can change to this /java folder and build by below command line. +you can change to this /src/client/java folder and build by below command line. mvn -DskipITs clean install -daos-java-api module depends on DAOS which is assumed being installed under /usr/local/daos. If you have different +daos-client module depends on DAOS which is assumed being installed under /usr/local/daos. If you have different location, you need to set it with '-Ddaos.install.path='. For example, mvn -DskipITs -Ddaos.install.path=/code/daos/install clean install @@ -70,7 +70,7 @@ When run with Hadoop yarn, you need to add below configuration to core-site.xml. ```xml fs.AbstractFileSystem.daos.impl -com.intel.daos.hadoop.fs.DaosAbsFsImpl +io.daos.fs.hadoop.DaosAbsFsImpl ``` diff --git a/src/client/java/daos-java-api/src/main/native/include/com_intel_daos_client_DaosFsClient.h b/src/client/java/daos-java-api/src/main/native/include/com_intel_daos_client_DaosFsClient.h deleted file mode 100644 index 35e457af1fa..00000000000 --- a/src/client/java/daos-java-api/src/main/native/include/com_intel_daos_client_DaosFsClient.h +++ /dev/null @@ -1,253 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class com_intel_daos_client_DaosFsClient */ - -#ifndef _Included_com_intel_daos_client_DaosFsClient -#define _Included_com_intel_daos_client_DaosFsClient -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: move - * Signature: (JLjava/lang/String;Ljava/lang/String;)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_move__JLjava_lang_String_2Ljava_lang_String_2 - (JNIEnv *, jobject, jlong, jstring, jstring); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: move - * Signature: (JJLjava/lang/String;JLjava/lang/String;)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_move__JJLjava_lang_String_2JLjava_lang_String_2 - (JNIEnv *, jobject, jlong, jlong, jstring, jlong, jstring); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: mkdir - * Signature: (JLjava/lang/String;IZ)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_mkdir - (JNIEnv *, jobject, jlong, jstring, jint, jboolean); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: createNewFile - * Signature: (JLjava/lang/String;Ljava/lang/String;IILjava/lang/String;IZ)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_createNewFile - (JNIEnv *, jobject, jlong, jstring, jstring, jint, jint, jstring, jint, jboolean); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: delete - * Signature: (JLjava/lang/String;Ljava/lang/String;Z)Z - */ -JNIEXPORT jboolean JNICALL Java_com_intel_daos_client_DaosFsClient_delete - (JNIEnv *, jobject, jlong, jstring, jstring, jboolean); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: daosOpenPool - * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_daosOpenPool - (JNIEnv *, jclass, jstring, jstring, jstring, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: daosOpenCont - * Signature: (JLjava/lang/String;I)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_daosOpenCont - (JNIEnv *, jclass, jlong, jstring, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: daosCloseContainer - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_daosCloseContainer - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: daosClosePool - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_daosClosePool - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsSetPrefix - * Signature: (JLjava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_com_intel_daos_client_DaosFsClient_dfsSetPrefix - (JNIEnv *, jobject, jlong, jstring); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsLookup - * Signature: (JJLjava/lang/String;IJ)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsLookup__JJLjava_lang_String_2IJ - (JNIEnv *, jobject, jlong, jlong, jstring, jint, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsLookup - * Signature: (JLjava/lang/String;IJ)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsLookup__JLjava_lang_String_2IJ - (JNIEnv *, jobject, jlong, jstring, jint, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsGetSize - * Signature: (JJ)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsGetSize - (JNIEnv *, jobject, jlong, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsDup - * Signature: (JJI)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsDup - (JNIEnv *, jobject, jlong, jlong, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsRelease - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsRelease - (JNIEnv *, jobject, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsRead - * Signature: (JJJJJI)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsRead - (JNIEnv *, jobject, jlong, jlong, jlong, jlong, jlong, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsWrite - * Signature: (JJJJJI)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsWrite - (JNIEnv *, jobject, jlong, jlong, jlong, jlong, jlong, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsReadDir - * Signature: (JJI)Ljava/lang/String; - */ -JNIEXPORT jstring JNICALL Java_com_intel_daos_client_DaosFsClient_dfsReadDir - (JNIEnv *, jobject, jlong, jlong, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsOpenedObjStat - * Signature: (JJJ)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsOpenedObjStat - (JNIEnv *, jobject, jlong, jlong, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsSetExtAttr - * Signature: (JJLjava/lang/String;Ljava/lang/String;I)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsSetExtAttr - (JNIEnv *, jobject, jlong, jlong, jstring, jstring, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsGetExtAttr - * Signature: (JJLjava/lang/String;I)Ljava/lang/String; - */ -JNIEXPORT jstring JNICALL Java_com_intel_daos_client_DaosFsClient_dfsGetExtAttr - (JNIEnv *, jobject, jlong, jlong, jstring, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsRemoveExtAttr - * Signature: (JJLjava/lang/String;)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsRemoveExtAttr - (JNIEnv *, jobject, jlong, jlong, jstring); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsGetChunkSize - * Signature: (J)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsGetChunkSize - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsGetMode - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_com_intel_daos_client_DaosFsClient_dfsGetMode - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsIsDirectory - * Signature: (I)Z - */ -JNIEXPORT jboolean JNICALL Java_com_intel_daos_client_DaosFsClient_dfsIsDirectory - (JNIEnv *, jclass, jint); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsMountFs - * Signature: (JJZ)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsMountFs - (JNIEnv *, jclass, jlong, jlong, jboolean); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsMountFsOnRoot - * Signature: (J)J - */ -JNIEXPORT jlong JNICALL Java_com_intel_daos_client_DaosFsClient_dfsMountFsOnRoot - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsUnmountFsOnRoot - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsUnmountFsOnRoot - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: dfsUnmountFs - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_dfsUnmountFs - (JNIEnv *, jclass, jlong); - -/* - * Class: com_intel_daos_client_DaosFsClient - * Method: daosFinalize - * Signature: ()V - */ -JNIEXPORT void JNICALL Java_com_intel_daos_client_DaosFsClient_daosFinalize - (JNIEnv *, jclass); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/src/client/java/daos-java-api/src/main/resources/log4j2.xml b/src/client/java/daos-java-api/src/main/resources/log4j2.xml deleted file mode 100644 index 429483f2269..00000000000 --- a/src/client/java/daos-java-api/src/main/resources/log4j2.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/client/java/daos-java-api/find_java_home.sh b/src/client/java/daos-java/find_java_home.sh similarity index 100% rename from src/client/java/daos-java-api/find_java_home.sh rename to src/client/java/daos-java/find_java_home.sh diff --git a/src/client/java/daos-java-api/pom.xml b/src/client/java/daos-java/pom.xml similarity index 91% rename from src/client/java/daos-java-api/pom.xml rename to src/client/java/daos-java/pom.xml index 34f4e554932..1ea6fc5f634 100644 --- a/src/client/java/daos-java-api/pom.xml +++ b/src/client/java/daos-java/pom.xml @@ -5,13 +5,13 @@ 4.0.0 - com.intel.daos + io.daos daos-java-root - 0.0.1-SNAPSHOT + 1.1.0-SNAPSHOT - com.intel.daos - daos-java-api + io.daos + daos-java jar @@ -56,7 +56,7 @@ true - com.intel.daos.client.DaosFsClient + io.daos.dfs.DaosFsClient ${project.basedir}/src/main/native/include @@ -84,7 +84,7 @@ - + @@ -94,8 +94,8 @@ - - + + @@ -105,7 +105,7 @@ executable="gcc"> - + diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/BufferAllocator.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/BufferAllocator.java similarity index 97% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/BufferAllocator.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/BufferAllocator.java index 17c438f8265..0f27acd3d16 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/BufferAllocator.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/BufferAllocator.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.nio.ByteBuffer; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Cleaner.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/Cleaner.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Cleaner.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/Cleaner.java index 524dfad32c5..c36a8f9ab12 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Cleaner.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/Cleaner.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.lang.ref.PhantomReference; import java.lang.ref.ReferenceQueue; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Constants.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/Constants.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Constants.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/Constants.java index 9d2871b087b..de3a7a2bfc6 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/Constants.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/Constants.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; /** * value constants. diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFile.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFile.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFile.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFile.java index 4a247965724..0eb763685a0 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFile.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFile.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.io.IOException; import java.nio.ByteBuffer; @@ -152,7 +152,7 @@ public void createNewFile(boolean createParent) throws IOException { * @param mode * should be octal number, like 0775 * @param objectType - * object type, see {@link com.intel.daos.client.DaosFsClient.DaosFsClientBuilder#defaultFileType} + * object type, see {@link io.daos.dfs.DaosFsClient.DaosFsClientBuilder#defaultFileType} * @param chunkSize * file chunk size * @param createParent diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFsClient.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFsClient.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFsClient.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFsClient.java index 0be836376a7..801d94543fd 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosFsClient.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosFsClient.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.io.File; import java.io.IOException; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosIOException.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosIOException.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosIOException.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/DaosIOException.java index a6e34041b27..dcfa6cd43c5 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosIOException.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosIOException.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.io.IOException; import java.lang.reflect.Field; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosObjectType.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosObjectType.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosObjectType.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/DaosObjectType.java index 07beafd377a..f0a6df82e76 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosObjectType.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosObjectType.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; /** * Type of DAOS object. diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosUtils.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosUtils.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosUtils.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/DaosUtils.java index 5c8a6b8c517..1ececda5303 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/DaosUtils.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/DaosUtils.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.util.UUID; import java.util.regex.Matcher; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ErrorCode.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/ErrorCode.java similarity index 97% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ErrorCode.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/ErrorCode.java index 21504e7b6c8..14cf51bf364 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ErrorCode.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/ErrorCode.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; /** * error code to message. diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ShutdownHookManager.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/ShutdownHookManager.java similarity index 98% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ShutdownHookManager.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/ShutdownHookManager.java index c9e4ea951a2..5be49a65cd4 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/ShutdownHookManager.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/ShutdownHookManager.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.util.Deque; import java.util.concurrent.ConcurrentLinkedDeque; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/StatAttributes.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/StatAttributes.java similarity index 99% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/StatAttributes.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/StatAttributes.java index e1b865ceaf4..0cd6dc85a89 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/StatAttributes.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/StatAttributes.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.client; +package io.daos.dfs; import java.nio.ByteBuffer; import java.nio.ByteOrder; diff --git a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/package-info.java b/src/client/java/daos-java/src/main/java/io/daos/dfs/package-info.java similarity index 63% rename from src/client/java/daos-java-api/src/main/java/com/intel/daos/client/package-info.java rename to src/client/java/daos-java/src/main/java/io/daos/dfs/package-info.java index e6190a44fe3..0e6690a6e82 100644 --- a/src/client/java/daos-java-api/src/main/java/com/intel/daos/client/package-info.java +++ b/src/client/java/daos-java/src/main/java/io/daos/dfs/package-info.java @@ -27,18 +27,18 @@ * *

* Typical usage: - * 1, Instantiate {@link com.intel.daos.client.DaosFsClient.DaosFsClientBuilder} as builder + * 1, Instantiate {@link io.daos.dfs.DaosFsClient.DaosFsClientBuilder} as builder * 2, Set poolId, containerId and other parameters on builder - * 3, Call {@link com.intel.daos.client.DaosFsClient.DaosFsClientBuilder#build()} to get - * {@link com.intel.daos.client.DaosFsClient} instance - * 4, Call {@linkplain com.intel.daos.client.DaosFsClient#getFile getFile} methods to instantiate - * {@link com.intel.daos.client.DaosFile} - * 5, Operate on {@link com.intel.daos.client.DaosFile} instance. + * 3, Call {@link io.daos.dfs.DaosFsClient.DaosFsClientBuilder#build()} to get + * {@link io.daos.dfs.DaosFsClient} instance + * 4, Call {@linkplain io.daos.dfs.DaosFsClient#getFile getFile} methods to instantiate + * {@link io.daos.dfs.DaosFile} + * 5, Operate on {@link io.daos.dfs.DaosFile} instance. * *

- * After the step 3, you can call below convenient methods directly on {@link com.intel.daos.client.DaosFsClient} - *

  • {@link com.intel.daos.client.DaosFsClient#mkdir(String, int, boolean)}
  • - *
  • {@link com.intel.daos.client.DaosFsClient#move(String, String)}
  • - *
  • {@link com.intel.daos.client.DaosFsClient#delete(String)}
  • + * After the step 3, you can call below convenient methods directly on {@link io.daos.dfs.DaosFsClient} + *
  • {@link io.daos.dfs.DaosFsClient#mkdir(String, int, boolean)}
  • + *
  • {@link io.daos.dfs.DaosFsClient#move(String, String)}
  • + *
  • {@link io.daos.dfs.DaosFsClient#delete(String)}
  • */ -package com.intel.daos.client; \ No newline at end of file +package io.daos.dfs; \ No newline at end of file diff --git a/src/client/java/daos-java-api/src/main/native/include/daos_jni_common.h b/src/client/java/daos-java/src/main/native/include/daos_jni_common.h similarity index 100% rename from src/client/java/daos-java-api/src/main/native/include/daos_jni_common.h rename to src/client/java/daos-java/src/main/native/include/daos_jni_common.h diff --git a/src/client/java/daos-java-api/src/main/native/com_intel_daos_client_DaosFsClient.c b/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c similarity index 94% rename from src/client/java/daos-java-api/src/main/native/com_intel_daos_client_DaosFsClient.c rename to src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c index 6b62d4832b2..2a2389190bb 100644 --- a/src/client/java/daos-java-api/src/main/native/com_intel_daos_client_DaosFsClient.c +++ b/src/client/java/daos-java/src/main/native/io_daos_dfs_DaosFsClient.c @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -#include "com_intel_daos_client_DaosFsClient.h" +#include "io_daos_dfs_DaosFsClient.h" #include #include #include @@ -67,7 +67,7 @@ JNI_OnLoad(JavaVM *vm, void *reserved) } jclass local_class = (*env)->FindClass(env, - "com/intel/daos/client/DaosIOException"); + "io/daos/dfs/DaosIOException"); daos_io_exception_class = (*env)->NewGlobalRef(env, local_class); jmethodID m1 = (*env)->GetMethodID(env, daos_io_exception_class, @@ -197,7 +197,7 @@ throw_exception_const_msg(JNIEnv *env, char *msg, int error_code) * \return copied pool handle in long */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_daosOpenPool(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_daosOpenPool(JNIEnv *env, jclass clientClass, jstring poolId, jstring serverGroup, jstring ranks, jint flags) { @@ -258,7 +258,7 @@ Java_com_intel_daos_client_DaosFsClient_daosOpenPool(JNIEnv *env, * \param[in] poolHandle pool handle */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_daosClosePool(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_daosClosePool(JNIEnv *env, jclass clientClass, jlong poolHandle) { daos_handle_t poh; @@ -284,7 +284,7 @@ Java_com_intel_daos_client_DaosFsClient_daosClosePool(JNIEnv *env, * \return copied container handle in long */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_daosOpenCont(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_daosOpenCont(JNIEnv *env, jclass clientClass, jlong poolHandle, jstring contUuid, jint mode) { @@ -321,7 +321,7 @@ Java_com_intel_daos_client_DaosFsClient_daosOpenCont(JNIEnv *env, * \param[in] contHandle container handle */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_daosCloseContainer(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_daosCloseContainer(JNIEnv *env, jclass clientClass, jlong contHandle) { daos_handle_t coh; @@ -347,7 +347,7 @@ Java_com_intel_daos_client_DaosFsClient_daosCloseContainer(JNIEnv *env, * \return address of dfs object */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsMountFs(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsMountFs(JNIEnv *env, jclass clientClass, jlong poolHandle, jlong contHandle, jboolean readOnly) { @@ -377,7 +377,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsMountFs(JNIEnv *env, * \param[in] dfsPtr address of dfs object */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsUnmountFs(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsUnmountFs(JNIEnv *env, jclass clientClass, jlong dfsPtr) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -400,7 +400,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsUnmountFs(JNIEnv *env, * \return address of dfs object */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsMountFsOnRoot(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsMountFsOnRoot(JNIEnv *env, jclass clientClass, jlong poolHandle) { dfs_t *dfsPtr; @@ -426,7 +426,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsMountFsOnRoot(JNIEnv *env, * \param[in] dfsPtr address of dfs object */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsUnmountFsOnRoot(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsUnmountFsOnRoot(JNIEnv *env, jclass clientClass, jlong dfsPtr) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -446,7 +446,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsUnmountFsOnRoot(JNIEnv *env, * \param[in] clientClass class of DaosFsClient */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_daosFinalize(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_daosFinalize(JNIEnv *env, jclass clientClass) { int rc = daos_fini(); @@ -467,7 +467,7 @@ Java_com_intel_daos_client_DaosFsClient_daosFinalize(JNIEnv *env, * \param[in] destPath destination path */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_move__JLjava_lang_String_2Ljava_lang_String_2( +Java_io_daos_dfs_DaosFsClient_move__JLjava_lang_String_2Ljava_lang_String_2( JNIEnv *env, jobject obj, jlong dfsPtr, jstring srcPath, jstring destPath) { @@ -554,7 +554,7 @@ Java_com_intel_daos_client_DaosFsClient_move__JLjava_lang_String_2Ljava_lang_Str * path */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_move__JJLjava_lang_String_2JLjava_lang_String_2( +Java_io_daos_dfs_DaosFsClient_move__JJLjava_lang_String_2JLjava_lang_String_2( JNIEnv *env, jobject obj, jlong dfsPtr, jlong srcPrtObjId, jstring srcName, jlong destPrtObjId, jstring destName) { @@ -668,7 +668,7 @@ static int mkdirs(dfs_t *dfs, char *path, int mode, unsigned char recursive, * \param[in] recursive create directory recursively */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_mkdir(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_mkdir(JNIEnv *env, jobject client, jlong dfsPtr, jstring path, jint mode, jboolean recursive) { const char* path_str = (*env)->GetStringUTFChars(env, path, NULL); @@ -751,7 +751,7 @@ Java_com_intel_daos_client_DaosFsClient_mkdir(JNIEnv *env, jobject client, * \return memory address of opened dfs object of new file */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_createNewFile(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_createNewFile(JNIEnv *env, jobject client, jlong dfsPtr, jstring parentPath, jstring name, jint mode, jint accessFlags, jstring objectType, jint chunkSize, jboolean createParent) @@ -836,7 +836,7 @@ Java_com_intel_daos_client_DaosFsClient_createNewFile(JNIEnv *env, * \return 0 if failed to delete, 1 if deleted successfully */ JNIEXPORT jboolean JNICALL -Java_com_intel_daos_client_DaosFsClient_delete(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_delete(JNIEnv *env, jobject client, jlong dfsPtr, jstring parentPath, jstring name, jboolean force) { @@ -880,7 +880,7 @@ Java_com_intel_daos_client_DaosFsClient_delete(JNIEnv *env, jobject client, * \return memory address of opened fs object. */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsLookup__JJLjava_lang_String_2IJ( +Java_io_daos_dfs_DaosFsClient_dfsLookup__JJLjava_lang_String_2IJ( JNIEnv *env, jobject client, jlong dfsPtr, jlong parentObjId, jstring name, jint flags, jlong bufferAddress) { @@ -920,7 +920,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsLookup__JJLjava_lang_String_2IJ( * \return memory address of opened fs object. */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsLookup__JLjava_lang_String_2IJ( +Java_io_daos_dfs_DaosFsClient_dfsLookup__JLjava_lang_String_2IJ( JNIEnv *env, jobject client, jlong dfsPtr, jstring path, jint flags, jlong bufferAddress) { @@ -953,7 +953,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsLookup__JLjava_lang_String_2IJ( * \return size of file */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsGetSize(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_dfsGetSize(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -982,7 +982,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsGetSize(JNIEnv *env, jobject client, * \return memory address of new file object */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsDup(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_dfsDup(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jint flags) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -1007,7 +1007,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsDup(JNIEnv *env, jobject client, * \parem[in] objId pointer to fs object */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsRelease(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsRelease(JNIEnv *env, jclass clientClass, jlong objId) { dfs_obj_t *file = *(dfs_obj_t **)&objId; @@ -1036,7 +1036,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsRelease(JNIEnv *env, * \return actual read length */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsRead(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_dfsRead(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jlong bufferAddress, jlong fileOffset, jlong len, jint eventNo) { @@ -1082,7 +1082,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsRead(JNIEnv *env, jobject client, * \return actual write length */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsWrite(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_dfsWrite(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jlong bufferAddress, jlong fileOffset, jlong len, jint eventNo) { @@ -1122,7 +1122,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsWrite(JNIEnv *env, jobject client, * \return file name separated by ',' */ JNIEXPORT jstring JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsReadDir(JNIEnv *env, jobject client, +Java_io_daos_dfs_DaosFsClient_dfsReadDir(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jint maxEntries) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -1248,7 +1248,7 @@ static void set_user_group_name(JNIEnv *env, char *buffer, struct stat *stat) * \param[in] bufferAddress pointer to opened fs object */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsOpenedObjStat(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsOpenedObjStat(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jlong bufferAddress) { @@ -1304,7 +1304,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsOpenedObjStat(JNIEnv *env, * \param[in] flags attribute flags */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsSetExtAttr(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsSetExtAttr(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jstring name, jstring value, jint flags) { @@ -1342,7 +1342,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsSetExtAttr(JNIEnv *env, * \return attribute value */ JNIEXPORT jstring JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsGetExtAttr(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsGetExtAttr(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jstring name, jint expectedValueLen) { @@ -1392,7 +1392,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsGetExtAttr(JNIEnv *env, * \param[in] name attribute name */ JNIEXPORT void JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsRemoveExtAttr(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsRemoveExtAttr(JNIEnv *env, jobject client, jlong dfsPtr, jlong objId, jstring name) { dfs_t *dfs = *(dfs_t **)&dfsPtr; @@ -1420,7 +1420,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsRemoveExtAttr(JNIEnv *env, * \return chunk size */ JNIEXPORT jlong JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsGetChunkSize(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsGetChunkSize(JNIEnv *env, jclass clientClass, jlong objId) { dfs_obj_t *file = *(dfs_obj_t **)&objId; @@ -1446,7 +1446,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsGetChunkSize(JNIEnv *env, * \return file mode */ JNIEXPORT jint JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsGetMode(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsGetMode(JNIEnv *env, jclass clientClass, jlong objId) { dfs_obj_t *file = *(dfs_obj_t **)&objId; @@ -1471,7 +1471,7 @@ Java_com_intel_daos_client_DaosFsClient_dfsGetMode(JNIEnv *env, * \return 0 for non-directory, 1 for directory */ JNIEXPORT jboolean JNICALL -Java_com_intel_daos_client_DaosFsClient_dfsIsDirectory(JNIEnv *env, +Java_io_daos_dfs_DaosFsClient_dfsIsDirectory(JNIEnv *env, jclass clientClass, jint mode) { return S_ISDIR(mode) ? 1 : 0; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileIT.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileIT.java similarity index 99% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileIT.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileIT.java index 4c4b96b81cd..3ac47acb99a 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileIT.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import com.sun.security.auth.module.UnixSystem; import org.junit.AfterClass; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileMultiThreadsIT.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileMultiThreadsIT.java similarity index 99% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileMultiThreadsIT.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileMultiThreadsIT.java index 8b4468f0568..ab2f5074eb7 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFileMultiThreadsIT.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFileMultiThreadsIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.AfterClass; import org.junit.Assert; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFilePathTest.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFilePathTest.java similarity index 98% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFilePathTest.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFilePathTest.java index 4635a198743..2f0cc54051e 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFilePathTest.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFilePathTest.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.Assert; import org.junit.Test; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientIT.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientIT.java similarity index 99% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientIT.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientIT.java index fb948cb2f88..29d22c68e89 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientIT.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.Assert; import org.junit.BeforeClass; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientTestBase.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientTestBase.java similarity index 87% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientTestBase.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientTestBase.java index db4dd91fd5e..5b42749e764 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosFsClientTestBase.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosFsClientTestBase.java @@ -1,10 +1,10 @@ -package com.intel.daos.client; +package io.daos.dfs; public class DaosFsClientTestBase { - public static final String DEFAULT_POOL_ID = "6614df7a-a13f-4f7b-aa1e-0ee3c6804456"; + public static final String DEFAULT_POOL_ID = "0417107c-144e-4394-a7f1-a281d0251b0c"; // public static final String DEFAULT_CONT_ID = "ffffffff-ffff-ffff-ffff-ffffffffffff"; - public static final String DEFAULT_CONT_ID = "fe258219-5c5f-4f48-bcd4-143996a770ce"; + public static final String DEFAULT_CONT_ID = "71bfbb65-5de6-4f85-88a5-e1a8b33af335"; public static DaosFsClient prepareFs(String poolId, String contId) throws Exception { DaosFsClient.DaosFsClientBuilder builder = new DaosFsClient.DaosFsClientBuilder(); diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosIOExceptionTest.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosIOExceptionTest.java similarity index 98% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosIOExceptionTest.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosIOExceptionTest.java index 3edb93bd26b..baca3f7e29d 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosIOExceptionTest.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosIOExceptionTest.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.Assert; import org.junit.Test; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosUtilsTest.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosUtilsTest.java similarity index 99% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosUtilsTest.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/DaosUtilsTest.java index fc64bae1c4b..40953775251 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/DaosUtilsTest.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/DaosUtilsTest.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.Assert; import org.junit.Test; diff --git a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/zCleanerIT.java b/src/client/java/daos-java/src/test/java/io/daos/dfs/zCleanerIT.java similarity index 84% rename from src/client/java/daos-java-api/src/test/java/com/intel/daos/client/zCleanerIT.java rename to src/client/java/daos-java/src/test/java/io/daos/dfs/zCleanerIT.java index d84905e6e01..33a85443437 100644 --- a/src/client/java/daos-java-api/src/test/java/com/intel/daos/client/zCleanerIT.java +++ b/src/client/java/daos-java/src/test/java/io/daos/dfs/zCleanerIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.client; +package io.daos.dfs; import org.junit.Test; diff --git a/src/client/java/distribution/pom.xml b/src/client/java/distribution/pom.xml index 5d298de9323..5e8dc848bfb 100644 --- a/src/client/java/distribution/pom.xml +++ b/src/client/java/distribution/pom.xml @@ -5,12 +5,12 @@ 4.0.0 - com.intel.daos + io.daos daos-java-root - 0.0.1-SNAPSHOT + 1.1.0-SNAPSHOT - com.intel.daos + io.daos distribution pom @@ -70,4 +70,4 @@ - \ No newline at end of file + diff --git a/src/client/java/distribution/src/assembly/make-assembly-with-dependencies.xml b/src/client/java/distribution/src/assembly/make-assembly-with-dependencies.xml index 40cfe51a0b8..03ed1de0237 100644 --- a/src/client/java/distribution/src/assembly/make-assembly-with-dependencies.xml +++ b/src/client/java/distribution/src/assembly/make-assembly-with-dependencies.xml @@ -27,4 +27,4 @@ - \ No newline at end of file + diff --git a/src/client/java/distribution/src/assembly/make-assembly.xml b/src/client/java/distribution/src/assembly/make-assembly.xml index e2c5c1940eb..d58bd7518f9 100644 --- a/src/client/java/distribution/src/assembly/make-assembly.xml +++ b/src/client/java/distribution/src/assembly/make-assembly.xml @@ -6,10 +6,10 @@ false - ${project.parent.basedir}/daos-java-api/target + ${project.parent.basedir}/daos-java/target . - daos-java-api-${project.version}.jar + daos-java-${project.version}.jar @@ -34,4 +34,4 @@ - \ No newline at end of file + diff --git a/src/client/java/hadoop-daos/pom.xml b/src/client/java/hadoop-daos/pom.xml index 0745fb997c4..c2f91783416 100644 --- a/src/client/java/hadoop-daos/pom.xml +++ b/src/client/java/hadoop-daos/pom.xml @@ -5,12 +5,12 @@ 4.0.0 - com.intel.daos + io.daos daos-java-root - 0.0.1-SNAPSHOT + 1.1.0-SNAPSHOT - com.intel.daos + io.daos hadoop-daos jar @@ -34,8 +34,8 @@ log4j-core - com.intel.daos - daos-java-api + io.daos + daos-java ${project.version} @@ -145,4 +145,4 @@ - \ No newline at end of file + diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/Constants.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/Constants.java similarity index 99% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/Constants.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/Constants.java index ff89e4514e6..070df6e8339 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/Constants.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/Constants.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; /** * ALL configuration and value constants. diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosAbsFsImpl.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosAbsFsImpl.java similarity index 98% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosAbsFsImpl.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosAbsFsImpl.java index 23824eb9ffa..7b1f726df9a 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosAbsFsImpl.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosAbsFsImpl.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.IOException; import java.net.URI; diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosConfig.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosConfig.java similarity index 99% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosConfig.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosConfig.java index 6e2c84c09f3..a38beeeb70a 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosConfig.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosConfig.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.*; import java.util.Iterator; diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosFileSystem.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosFileSystem.java similarity index 89% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosFileSystem.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosFileSystem.java index aef89a1494a..ef871877dcf 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosFileSystem.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosFileSystem.java @@ -21,7 +21,7 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.FileNotFoundException; import java.io.IOException; @@ -29,7 +29,7 @@ import java.util.List; import com.google.common.collect.Lists; -import com.intel.daos.client.*; +import io.daos.dfs.*; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; @@ -60,56 +60,56 @@ * * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_POOL_UUID} + * {@value io.daos.fs.hadoop.Constants#DAOS_POOL_UUID} * * * true * UUID of DAOS pool * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_CONTAINER_UUID} + * {@value io.daos.fs.hadoop.Constants#DAOS_CONTAINER_UUID} * * * true * UUID od DAOS container which created with "--type posix" * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_READ_BUFFER_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#DEFAULT_DAOS_READ_BUFFER_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#MINIMUM_DAOS_READ_BUFFER_SIZE} - - * {@value com.intel.daos.hadoop.fs.Constants#MAXIMUM_DAOS_READ_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#DAOS_READ_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#DEFAULT_DAOS_READ_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#MINIMUM_DAOS_READ_BUFFER_SIZE} - + * {@value io.daos.fs.hadoop.Constants#MAXIMUM_DAOS_READ_BUFFER_SIZE} * false * size of direct buffer for reading data from DAOS * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_WRITE_BUFFER_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#DEFAULT_DAOS_WRITE_BUFFER_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#MINIMUM_DAOS_WRITE_BUFFER_SIZE} - - * {@value com.intel.daos.hadoop.fs.Constants#MAXIMUM_DAOS_WRITE_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#DAOS_WRITE_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#DEFAULT_DAOS_WRITE_BUFFER_SIZE} + * {@value io.daos.fs.hadoop.Constants#MINIMUM_DAOS_WRITE_BUFFER_SIZE} - + * {@value io.daos.fs.hadoop.Constants#MAXIMUM_DAOS_WRITE_BUFFER_SIZE} * false * size of direct buffer for writing data to DAOS * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_BLOCK_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#DEFAULT_DAOS_BLOCK_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#MINIMUM_DAOS_BLOCK_SIZE} - - * {@value com.intel.daos.hadoop.fs.Constants#MAXIMUM_DAOS_BLOCK_SIZE} + * {@value io.daos.fs.hadoop.Constants#DAOS_BLOCK_SIZE} + * {@value io.daos.fs.hadoop.Constants#DEFAULT_DAOS_BLOCK_SIZE} + * {@value io.daos.fs.hadoop.Constants#MINIMUM_DAOS_BLOCK_SIZE} - + * {@value io.daos.fs.hadoop.Constants#MAXIMUM_DAOS_BLOCK_SIZE} * false * size for splitting large file into blocks when read by Hadoop * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_CHUNK_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#DEFAULT_DAOS_CHUNK_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#MINIMUM_DAOS_CHUNK_SIZE} - - * {@value com.intel.daos.hadoop.fs.Constants#MAXIMUM_DAOS_CHUNK_SIZE} + * {@value io.daos.fs.hadoop.Constants#DAOS_CHUNK_SIZE} + * {@value io.daos.fs.hadoop.Constants#DEFAULT_DAOS_CHUNK_SIZE} + * {@value io.daos.fs.hadoop.Constants#MINIMUM_DAOS_CHUNK_SIZE} - + * {@value io.daos.fs.hadoop.Constants#MAXIMUM_DAOS_CHUNK_SIZE} * false * size of DAOS file chunk * * - * {@value com.intel.daos.hadoop.fs.Constants#DAOS_PRELOAD_SIZE} - * {@value com.intel.daos.hadoop.fs.Constants#DEFAULT_DAOS_PRELOAD_SIZE} + * {@value io.daos.fs.hadoop.Constants#DAOS_PRELOAD_SIZE} + * {@value io.daos.fs.hadoop.Constants#DEFAULT_DAOS_PRELOAD_SIZE} * maximum is - * {@value com.intel.daos.hadoop.fs.Constants#MAXIMUM_DAOS_PRELOAD_SIZE} + * {@value io.daos.fs.hadoop.Constants#MAXIMUM_DAOS_PRELOAD_SIZE} * false * size for pre-loading more than requested data from DAOS into internal buffer when read * @@ -411,7 +411,7 @@ public FileStatus[] listStatus(Path f) throws IOException { } catch (IOException e) { if (e instanceof DaosIOException) { DaosIOException de = (DaosIOException) e; - if (de.getErrorCode() == com.intel.daos.client.Constants.ERROR_CODE_NOT_EXIST) { + if (de.getErrorCode() == io.daos.dfs.Constants.ERROR_CODE_NOT_EXIST) { throw new FileNotFoundException(e.getMessage()); } } @@ -436,7 +436,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { LOG.debug("DaosFileSystem mkdirs: Making directory = {} ", f.toUri().getPath()); } String key = f.toUri().getPath(); - daos.mkdir(key, com.intel.daos.client.Constants.FILE_DEFAULT_FILE_MODE, true); + daos.mkdir(key, io.daos.dfs.Constants.FILE_DEFAULT_FILE_MODE, true); return true; } diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosInputStream.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosInputStream.java similarity index 99% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosInputStream.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosInputStream.java index cb3556d893f..c8bfe37e186 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosInputStream.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosInputStream.java @@ -21,13 +21,13 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; -import com.intel.daos.client.DaosFile; +import io.daos.dfs.DaosFile; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosOutputStream.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosOutputStream.java similarity index 98% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosOutputStream.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosOutputStream.java index 8adea409bb5..97f3d2b6963 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/DaosOutputStream.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/DaosOutputStream.java @@ -21,13 +21,13 @@ * portions thereof marked with this legend must also reproduce the markings. */ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; -import com.intel.daos.client.DaosFile; +import io.daos.dfs.DaosFile; import org.apache.hadoop.fs.FileSystem; diff --git a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/package-info.java b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/package-info.java similarity index 79% rename from src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/package-info.java rename to src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/package-info.java index 1c3d2938a19..008ad54fe61 100644 --- a/src/client/java/hadoop-daos/src/main/java/com/intel/daos/hadoop/fs/package-info.java +++ b/src/client/java/hadoop-daos/src/main/java/io/daos/fs/hadoop/package-info.java @@ -25,7 +25,7 @@ * DAOS Implementation of Hadoop File System. * *
    - * To get instance of DAOS Implementation, {@link com.intel.daos.hadoop.fs.DaosFileSystem}, user just needs to make
    + * To get instance of DAOS Implementation, {@link io.daos.fs.hadoop.DaosFileSystem}, user just needs to make
      * below statements after proper hadoop configuration.
      * 
      * Configuration cfg = new Configuration();
    @@ -37,11 +37,11 @@
      * 
    * *

    - * Be noted the schema is {@link com.intel.daos.hadoop.fs.Constants#DAOS_SCHEMA} + * Be noted the schema is {@link io.daos.fs.hadoop.Constants#DAOS_SCHEMA} * *

    - * For hadoop configuration, please refer {@linkplain com.intel.daos.hadoop.fs.DaosFileSystem DaosFileSystem} + * For hadoop configuration, please refer {@linkplain io.daos.fs.hadoop.DaosFileSystem DaosFileSystem} * - * @see com.intel.daos.hadoop.fs.DaosFileSystem + * @see io.daos.fs.hadoop.DaosFileSystem */ -package com.intel.daos.hadoop.fs; \ No newline at end of file +package io.daos.fs.hadoop; \ No newline at end of file diff --git a/src/client/java/hadoop-daos/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/src/client/java/hadoop-daos/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 758e4f20111..61daad6f9a0 100644 --- a/src/client/java/hadoop-daos/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/src/client/java/hadoop-daos/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -19,4 +19,4 @@ # Any reproduction of computer software, computer software documentation, or # portions thereof marked with this legend must also reproduce the markings. -com.intel.daos.hadoop.fs.DaosFileSystem \ No newline at end of file +io.daos.fs.hadoop.DaosFileSystem \ No newline at end of file diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosConfigTest.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosConfigTest.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosConfigTest.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosConfigTest.java index 8d14feea6dd..6c0afb6b3a8 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosConfigTest.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosConfigTest.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.File; import java.lang.reflect.Constructor; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFSFactory.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFSFactory.java similarity index 88% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFSFactory.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFSFactory.java index 6256eece682..79e4fcba60f 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFSFactory.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFSFactory.java @@ -1,7 +1,7 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; -import com.intel.daos.client.DaosFile; -import com.intel.daos.client.DaosFsClient; +import io.daos.dfs.DaosFile; +import io.daos.dfs.DaosFsClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -14,8 +14,8 @@ public class DaosFSFactory { // public final static String pooluuid = "53a47469-ea2a-418e-89d3-6d1df1aaadb4"; // public final static String contuuid = "9e60aff2-ca28-45fe-bdb0-d1a6c182c342"; - public final static String defaultPoolId = "6614df7a-a13f-4f7b-aa1e-0ee3c6804456"; - public final static String defaultContId = "fe258219-5c5f-4f48-bcd4-143996a770ce"; + public final static String defaultPoolId = "0417107c-144e-4394-a7f1-a281d0251b0c"; + public final static String defaultContId = "71bfbb65-5de6-4f85-88a5-e1a8b33af335"; public final static String pooluuid = System.getProperty("pool_id", defaultPoolId); public final static String contuuid = System.getProperty("cont_id", defaultContId); public final static String svc = "0"; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemContractIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemContractIT.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemContractIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemContractIT.java index 19b09f53617..bed4b7805e6 100755 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemContractIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemContractIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemIT.java similarity index 98% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemIT.java index 4a78987a992..c3a13726181 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemTest.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemTest.java similarity index 98% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemTest.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemTest.java index ef1b2d926e8..d21c6ab33aa 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosFileSystemTest.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosFileSystemTest.java @@ -1,6 +1,6 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; -import com.intel.daos.client.DaosFsClient; +import io.daos.dfs.DaosFsClient; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -23,7 +23,7 @@ @RunWith(PowerMockRunner.class) @PowerMockIgnore("javax.management.*") @PrepareForTest({DaosFsClient.DaosFsClientBuilder.class, DaosFileSystem.class}) -@SuppressStaticInitializationFor("com.intel.daos.client.DaosFsClient") +@SuppressStaticInitializationFor("io.daos.dfs.DaosFsClient") public class DaosFileSystemTest { @Test diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamIT.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamIT.java index 69b9f1e7ada..fddef30c54b 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamTest.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamTest.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamTest.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamTest.java index 27d4f707f72..3973ae9ea7a 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosInputStreamTest.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosInputStreamTest.java @@ -1,6 +1,6 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; -import com.intel.daos.client.DaosFile; +import io.daos.dfs.DaosFile; import org.apache.hadoop.fs.FileSystem; import org.junit.Assert; import org.junit.Test; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamIT.java similarity index 98% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamIT.java index c35b83a375d..8bee2902eb4 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamTest.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamTest.java similarity index 98% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamTest.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamTest.java index 5599af48b86..2eed5e1c9a9 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosOutputStreamTest.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosOutputStreamTest.java @@ -1,6 +1,6 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; -import com.intel.daos.client.DaosFile; +import io.daos.dfs.DaosFile; import org.apache.hadoop.fs.FileSystem; import org.junit.Assert; import org.junit.Test; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosUtils.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosUtils.java similarity index 97% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosUtils.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosUtils.java index ff8caca1419..1e2de45ab7f 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/DaosUtils.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/DaosUtils.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/HadoopCmdIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/HadoopCmdIT.java similarity index 87% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/HadoopCmdIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/HadoopCmdIT.java index d0f2954af62..943b3a6f922 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/HadoopCmdIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/HadoopCmdIT.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs; +package io.daos.fs.hadoop; import java.io.ByteArrayOutputStream; import java.io.PrintStream; @@ -64,10 +64,16 @@ public void listRoot() throws Exception { @Test public void testMkdir() throws Exception { - String[] argv = new String[]{"-mkdir", "/job_1581472776049_0003-1581473346405-" + + String filePath = "/job_1581472776049_0003-1581473346405-" + "root-autogen%2D7.1%2DSNAPSHOT%2Djar%2Dwith%2Ddependencies.jar-" + - "1581473454525-16-1-SUCCEEDED-default-1581473439146.jhist_tmp"}; + "1581473454525-16-1-SUCCEEDED-default-1581473439146.jhist_tmp"; + String[] argv = new String[]{"-rm", "-r", filePath}; int res = run(argv); + Assert.assertTrue(res == 0); + + argv = new String[]{"-mkdir", filePath}; + res = run(argv); + Assert.assertTrue(res == 0); } @AfterClass diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/MultipleDaosOpenFileIT.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/MultipleDaosOpenFileIT.java similarity index 94% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/MultipleDaosOpenFileIT.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/MultipleDaosOpenFileIT.java index c11a3335d9b..02336577097 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/MultipleDaosOpenFileIT.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/MultipleDaosOpenFileIT.java @@ -1,6 +1,6 @@ -package com.intel.daos.hadoop.fs.multiple; +package io.daos.fs.hadoop.multiple; -import com.intel.daos.hadoop.fs.DaosFSFactory; +import io.daos.fs.hadoop.DaosFSFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Before; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosCreateFile.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosCreateFile.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosCreateFile.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosCreateFile.java index 819cd38a9e3..de4bc9feaf7 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosCreateFile.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosCreateFile.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs.multiple; +package io.daos.fs.hadoop.multiple; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMkdir.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMkdir.java similarity index 99% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMkdir.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMkdir.java index d185f35cac5..c0d905ef9d0 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMkdir.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMkdir.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs.multiple; +package io.daos.fs.hadoop.multiple; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMount.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMount.java similarity index 97% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMount.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMount.java index c20ae10d8c1..76a8ccad72e 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/multiple/TestMultipleDaosMount.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/multiple/TestMultipleDaosMount.java @@ -1,6 +1,6 @@ -package com.intel.daos.hadoop.fs.multiple; +package io.daos.fs.hadoop.multiple; -//import com.intel.daos.hadoop.fs.CreateDaosFS; +//import io.daos.fs.hadoop.CreateDaosFS; import org.apache.hadoop.fs.FileSystem; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Main.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Main.java similarity index 98% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Main.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Main.java index 53cf1d8c7ad..e09653064c3 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Main.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Main.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs.perf; +package io.daos.fs.hadoop.perf; import java.io.BufferedReader; import java.io.File; @@ -10,10 +10,10 @@ import java.util.Random; import java.util.concurrent.*; -import com.intel.daos.client.DaosFile; -import com.intel.daos.client.DaosFsClient; -import com.intel.daos.hadoop.fs.Constants; -import com.intel.daos.hadoop.fs.DaosConfig; +import io.daos.dfs.DaosFile; +import io.daos.dfs.DaosFsClient; +import io.daos.fs.hadoop.Constants; +import io.daos.fs.hadoop.DaosConfig; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Test.java b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Test.java similarity index 94% rename from src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Test.java rename to src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Test.java index 7b2a2900b0c..77f598faa6c 100644 --- a/src/client/java/hadoop-daos/src/test/java/com/intel/daos/hadoop/fs/perf/Test.java +++ b/src/client/java/hadoop-daos/src/test/java/io/daos/fs/hadoop/perf/Test.java @@ -1,4 +1,4 @@ -package com.intel.daos.hadoop.fs.perf; +package io.daos.fs.hadoop.perf; import java.util.Random; diff --git a/src/client/java/hadoop-daos/src/test/resources/java-test.sh b/src/client/java/hadoop-daos/src/test/resources/java-test.sh index e94642e75c1..d504abc89c8 100644 --- a/src/client/java/hadoop-daos/src/test/resources/java-test.sh +++ b/src/client/java/hadoop-daos/src/test/resources/java-test.sh @@ -16,7 +16,7 @@ then java -Xmx2048m -cp ./hadoop-daos-0.0.1-SNAPSHOT-shaded.jar:\ ./hadoop-daos-0.0.1-SNAPSHOT-tests.jar "$props" \ -com.intel.daos.hadoop.fs.perf.Main "$first" 1>"$out" 2>"$err" +io.daos.fs.hadoop.perf.Main "$first" 1>"$out" 2>"$err" else props="$*" @@ -24,6 +24,6 @@ else java -Xmx2048m -cp ./hadoop-daos-0.0.1-SNAPSHOT-shaded.jar:\ ./hadoop-daos-0.0.1-SNAPSHOT-tests.jar "$props" \ -com.intel.daos.hadoop.fs.perf.Main "$first" 1>stdout 2>stderr +io.daos.fs.hadoop.perf.Main "$first" 1>stdout 2>stderr fi diff --git a/src/client/java/pom.xml b/src/client/java/pom.xml index a53bd03a5a2..eb91a1ee755 100644 --- a/src/client/java/pom.xml +++ b/src/client/java/pom.xml @@ -12,13 +12,13 @@ 4.0.0 - com.intel.daos + io.daos daos-java-root - 0.0.1-SNAPSHOT + 1.1.0-SNAPSHOT pom DAOS Java Root POM - Java API for DAOS Object and DAOS FS + Java Root POM for DAOS http://daos.io @@ -310,7 +310,7 @@ - daos-java-api + daos-java hadoop-daos distribution diff --git a/src/client/pydaos/raw/daos_api.py b/src/client/pydaos/raw/daos_api.py index 864cf0d1320..f17b43c181e 100644 --- a/src/client/pydaos/raw/daos_api.py +++ b/src/client/pydaos/raw/daos_api.py @@ -1371,7 +1371,6 @@ def __init__(self): self.chksum_type = ctypes.c_uint64(100) self.chunk_size = ctypes.c_uint64(0) - class DaosInputParams(object): # pylint: disable=too-few-public-methods """ This is a helper python method @@ -1397,7 +1396,6 @@ def get_con_create_params(self): """ return self.co_prop - class DaosContainer(object): """A python object representing a DAOS container.""" @@ -1468,13 +1466,14 @@ def create(self, poh, con_uuid=None, con_prop=None, cb_func=None): if self.cont_input_values.type != "Unknown": self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( DaosContPropEnum.DAOS_PROP_CO_LAYOUT_TYPE.value) - if self.cont_input_values.type == "posix": + if self.cont_input_values.type in ("posix", "POSIX"): self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( DaosContPropEnum.DAOS_PROP_CO_LAYOUT_POSIX.value) elif self.cont_input_values.type == "hdf5": self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( DaosContPropEnum.DAOS_PROP_CO_LAYOUT_HDF5.value) else: + # TODO: This should ideally fail. self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( DaosContPropEnum.DAOS_PROP_CO_LAYOUT_UNKOWN.value) idx = idx + 1 diff --git a/src/control/SConscript b/src/control/SConscript index 78ae98887bb..5e3d2e0acff 100644 --- a/src/control/SConscript +++ b/src/control/SConscript @@ -141,7 +141,7 @@ def scons(): cartpath = Dir('#/src/cart/src/include').srcnode().abspath denv.AppendENVPath("CGO_LDFLAGS", - denv.subst("-L$BUILD_DIR/src/cart/src/gurt")) + denv.subst("-L$BUILD_DIR/src/cart/src/gurt"), sep=" ") denv.AppendENVPath("CGO_CFLAGS", "-I%s" % cartpath, sep=" ") agentbin = install_go_bin(denv, gosrc, None, "agent", "daos_agent") @@ -159,9 +159,8 @@ def scons(): "-L$BUILD_DIR/src/cart/src/gurt " "-L$BUILD_DIR/src/cart/src/cart " "-L$PREFIX/lib64 -L$SPDK_PREFIX/lib " - "-L$HWLOC_PREFIX/lib -L$OFI_PREFIX/lib " - "-L$ISAL_PREFIX/lib $_RPATH") - # Explicitly link RTE & SPDK libs for CGO access. + "-L$OFI_PREFIX/lib -L$ISAL_PREFIX/lib $_RPATH") + # Explicitly link RTE & SPDK libs for CGO access (need for clang build). senv.AppendENVPath("CGO_LDFLAGS", cgolibdirs + \ " -lspdk_env_dpdk -lspdk_thread -lspdk_bdev" + \ " -lspdk_copy -lrte_mempool -lrte_mempool_ring" + \ @@ -175,7 +174,6 @@ def scons(): senv.AppendENVPath("CGO_CFLAGS", senv.subst("-I$SPDK_PREFIX/include " - "-I$HWLOC_PREFIX/include " "-I$OFI_PREFIX/include"), sep=" ") diff --git a/src/control/lib/spdk/SConscript b/src/control/lib/spdk/SConscript index 8f015e88816..54ebb8ba43c 100644 --- a/src/control/lib/spdk/SConscript +++ b/src/control/lib/spdk/SConscript @@ -13,12 +13,17 @@ def scons(): senv.AppendUnique(CPPPATH=[join(Dir('.').srcnode().abspath, "include")]) - # Link to DPDK static libs - senv.AppendUnique(LINKFLAGS=['-Wl,--whole-archive', \ - '-lrte_mempool', '-lrte_mempool_ring', \ - '-lrte_bus_pci', '-lrte_pci', '-lrte_ring', \ - '-lrte_mbuf', '-lrte_eal', '-lrte_kvargs', \ - '-Wl,--no-whole-archive']) + # SPDK related libs + libs = ['spdk_env_dpdk', 'spdk_thread', 'spdk_bdev', 'spdk_copy'] + libs += ['rte_mempool', 'rte_mempool_ring', 'rte_bus_pci'] + libs += ['rte_pci', 'rte_ring', 'rte_mbuf', 'rte_eal', 'rte_kvargs'] + libs += ['spdk_bdev_aio', 'spdk_bdev_nvme', 'spdk_bdev_malloc'] + libs += ['spdk_conf', 'spdk_blob', 'spdk_nvme', 'spdk_util'] + libs += ['spdk_json', 'spdk_jsonrpc', 'spdk_rpc', 'spdk_trace'] + libs += ['spdk_sock', 'spdk_log', 'spdk_notify', 'spdk_blob_bdev'] + + # Other libs + libs += ['numa', 'dl', 'isal'] # hack to avoid building this library with cov compiler for the moment compiler = senv.get('COMPILER').lower() @@ -28,9 +33,7 @@ def scons(): senv.nc = senv.Object("src/nvme_control.c") senv.ncc = senv.Object("src/nvme_control_common.c") denv.nvmecontrol = senv.StaticLibrary("nvme_control", [senv.nc, senv.ncc], - CC=compiler, LIBS=['spdk', 'numa', - 'spdk_env_dpdk', - 'isal']) + CC=compiler, LIBS=libs) senv.Install(join(senv.subst("$PREFIX"), "lib64"), denv.nvmecontrol) diff --git a/src/control/lib/spdk/ctests/SConscript b/src/control/lib/spdk/ctests/SConscript index 45328868af4..743c826379e 100644 --- a/src/control/lib/spdk/ctests/SConscript +++ b/src/control/lib/spdk/ctests/SConscript @@ -13,14 +13,24 @@ def scons(): unit_env.AppendUnique(CPPPATH=["$SPDK_SRC/lib/nvme", "$SPDK_SRC/include"]) + # SPDK related libs + libs = ['spdk_env_dpdk', 'spdk_thread', 'spdk_bdev', 'spdk_copy'] + libs += ['rte_mempool', 'rte_mempool_ring', 'rte_bus_pci'] + libs += ['rte_pci', 'rte_ring', 'rte_mbuf', 'rte_eal', 'rte_kvargs'] + libs += ['spdk_bdev_aio', 'spdk_bdev_nvme', 'spdk_bdev_malloc'] + libs += ['spdk_conf', 'spdk_blob', 'spdk_nvme', 'spdk_util'] + libs += ['spdk_json', 'spdk_jsonrpc', 'spdk_rpc', 'spdk_trace'] + libs += ['spdk_sock', 'spdk_log', 'spdk_notify', 'spdk_blob_bdev'] + + # Other libs + libs += ['numa', 'dl', 'isal', 'cmocka', 'pthread'] + config = Configure(unit_env) if config.CheckHeader("nvme_internal.h"): testbin = daos_build.test(unit_env, 'nvme_control_ctests', ['nvme_control_ut.c', unit_env.ncc, - unit_env.nc], - LIBS=['spdk', 'isal', 'spdk_env_dpdk', 'numa', - 'cmocka', 'pthread', 'dl']) + unit_env.nc], LIBS=libs) unit_env.Install("$PREFIX/bin", testbin) else: print("SPDK nvme_internal.h missing, skipping nvme_control_ut build") diff --git a/src/dtx/dtx_common.c b/src/dtx/dtx_common.c index f72074bba61..0c1cf814b0b 100644 --- a/src/dtx/dtx_common.c +++ b/src/dtx/dtx_common.c @@ -1,5 +1,5 @@ /** - * (C) Copyright 2019 Intel Corporation. + * (C) Copyright 2019-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -96,7 +96,6 @@ dtx_flush_on_deregister(struct dss_module_info *dmi, { struct ds_cont_child *cont = dbca->dbca_cont; struct ds_pool_child *pool = cont->sc_pool; - ABT_future future = dbca->dbca_deregistering; int rc; D_ASSERT(dbca->dbca_deregistering != NULL); @@ -123,7 +122,7 @@ dtx_flush_on_deregister(struct dss_module_info *dmi, * flush done, then free the dbca. */ d_list_del_init(&dbca->dbca_link); - rc = ABT_future_set(future, NULL); + rc = ABT_future_set(dbca->dbca_deregistering, NULL); D_ASSERTF(rc == ABT_SUCCESS, "ABT_future_set failed for DTX " "flush on "DF_UUID": rc = %d\n", DP_UUID(cont->sc_uuid), rc); } @@ -586,7 +585,7 @@ dtx_leader_end(struct dtx_leader_handle *dlh, struct ds_cont_child *cont, dth->dth_renew = 1; } - if (result < 0 || rc < 0) + if (result < 0 || rc < 0 || !dth->dth_actived) D_GOTO(out, result = result < 0 ? result : rc); if (dth->dth_intent == DAOS_INTENT_PUNCH) diff --git a/src/dtx/dtx_rpc.c b/src/dtx/dtx_rpc.c index 1329cca32b1..f0d5400ab93 100644 --- a/src/dtx/dtx_rpc.c +++ b/src/dtx/dtx_rpc.c @@ -1,5 +1,5 @@ /** - * (C) Copyright 2019 Intel Corporation. + * (C) Copyright 2019-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -241,18 +241,18 @@ dtx_req_list_cb(void **args) } else { for (i = 0; i < dra->dra_length; i++) { drr = args[i]; - if (dra->dra_result == 0) + if ((drr->drr_result < 0) && + (dra->dra_result == 0 || + dra->dra_result == -DER_NONEXIST)) dra->dra_result = drr->drr_result; - - if (dra->dra_result != 0) { - D_ERROR("DTX req for opc %x failed: rc = %d.\n", - dra->dra_opc, dra->dra_result); - return; - } } - D_DEBUG(DB_TRACE, "DTX req for opc %x succeed.\n", - dra->dra_opc); + drr = args[0]; + D_CDEBUG(dra->dra_result < 0, DLOG_ERR, DB_TRACE, + "DTX req for opc %x ("DF_DTI") %s, count %d: %d.\n", + dra->dra_opc, DP_DTI(drr->drr_dti), + dra->dra_result < 0 ? "failed" : "succeed", + dra->dra_length, dra->dra_result); } } @@ -611,13 +611,20 @@ dtx_commit(uuid_t po_uuid, uuid_t co_uuid, struct dtx_entry *dtes, } rc1 = vos_dtx_commit(cont->sc_hdl, dti, count); + /* -DER_NONEXIST may be caused by race or repeated commit, ignore it. */ + if (rc1 == -DER_NONEXIST) + rc1 = 0; - if (dra.dra_future != ABT_FUTURE_NULL) + if (dra.dra_future != ABT_FUTURE_NULL) { rc2 = dtx_req_wait(&dra); + if (rc2 == -DER_NONEXIST) + rc2 = 0; + } out: - D_DEBUG(DB_TRACE, "Commit DTXs "DF_DTI", count %d: rc %d %d %d\n", - DP_DTI(&dtes[0].dte_xid), count, rc, rc1, rc2); + D_CDEBUG(rc != 0 || rc1 != 0 || rc2 != 0, DLOG_ERR, DB_TRACE, + "Commit DTXs "DF_DTI", count %d: rc %d %d %d\n", + DP_DTI(&dtes[0].dte_xid), count, rc, rc1, rc2); if (dti != NULL) D_FREE(dti); @@ -630,7 +637,7 @@ dtx_commit(uuid_t po_uuid, uuid_t co_uuid, struct dtx_entry *dtes, if (cont != NULL) ds_cont_child_put(cont); - return rc > 0 ? 0 : rc; + return rc < 0 ? rc : (rc1 < 0 ? rc1 : (rc2 < 0 ? rc2 : 0)); } int @@ -680,12 +687,16 @@ dtx_abort(uuid_t po_uuid, uuid_t co_uuid, daos_epoch_t epoch, if (rc1 == -DER_NONEXIST) rc1 = 0; - if (dra.dra_future != ABT_FUTURE_NULL) + if (dra.dra_future != ABT_FUTURE_NULL) { rc2 = dtx_req_wait(&dra); + if (rc2 == -DER_NONEXIST) + rc2 = 0; + } out: - D_DEBUG(DB_TRACE, "Abort DTXs "DF_DTI", count %d: rc %d %d %d\n", - DP_DTI(&dtes[0].dte_xid), count, rc, rc1, rc2); + D_CDEBUG(rc != 0 || rc1 != 0 || rc2 != 0, DLOG_ERR, DB_TRACE, + "Abort DTXs "DF_DTI", count %d: rc %d %d %d\n", + DP_DTI(&dtes[0].dte_xid), count, rc, rc1, rc2); if (dti != NULL) D_FREE(dti); @@ -698,7 +709,7 @@ dtx_abort(uuid_t po_uuid, uuid_t co_uuid, daos_epoch_t epoch, if (cont != NULL) ds_cont_child_put(cont); - return rc > 0 ? 0 : rc; + return rc < 0 ? rc : (rc1 < 0 ? rc1 : (rc2 < 0 ? rc2 : 0)); } int diff --git a/src/include/daos/event.h b/src/include/daos/event.h index bb9d1d44dc8..3e470ee9b86 100644 --- a/src/include/daos/event.h +++ b/src/include/daos/event.h @@ -54,7 +54,6 @@ enum daos_ev_flags { struct tse_task_t; typedef int (*daos_event_comp_cb_t)(void *, daos_event_t *, int); -extern bool dfs_no_cond_op; /** * Finish event queue library. diff --git a/src/include/daos_api.h b/src/include/daos_api.h index 64929f44060..3e9506453ff 100644 --- a/src/include/daos_api.h +++ b/src/include/daos_api.h @@ -1,5 +1,5 @@ /* - * (C) Copyright 2015-2019 Intel Corporation. + * (C) Copyright 2015-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -123,6 +123,8 @@ daos_tx_abort(daos_handle_t th, daos_event_t *ev); * involved. * * \param[in] th Transaction handle to free. + * \param[in] ev Completion event, it is optional and can be NULL. + * The function will run in blocking mode if \a ev is NULL. * * \return 0 if Success, negative if failed. */ @@ -133,7 +135,7 @@ daos_tx_close(daos_handle_t th, daos_event_t *ev); * Return epoch associated with the transaction handle. * * \param[in] th Transaction handle. - * \param[out] th Returned epoch value. + * \param[out] epoch Returned epoch value. * * \return 0 if Success, negative if failed. */ diff --git a/src/include/daos_array.h b/src/include/daos_array.h index 1f48ebdc943..52867b2507d 100644 --- a/src/include/daos_array.h +++ b/src/include/daos_array.h @@ -146,11 +146,8 @@ daos_array_generate_id(daos_obj_id_t *oid, daos_oclass_id_t cid, bool add_attr, * 0 Success * -DER_NO_HDL Invalid container handle * -DER_INVAL Invalid parameter - * -DER_UNREACH Network is unreachable * -DER_NO_PERM Permission denied - * -DER_NONEXIST Cannot find object - * -DER_EP_OLD Epoch is too old and has no data for - * this object + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_create(daos_handle_t coh, daos_obj_id_t oid, daos_handle_t th, @@ -180,11 +177,9 @@ daos_array_create(daos_handle_t coh, daos_obj_id_t oid, daos_handle_t th, * 0 Success * -DER_NO_HDL Invalid container handle * -DER_INVAL Invalid parameter - * -DER_UNREACH Network is unreachable * -DER_NO_PERM Permission denied * -DER_NONEXIST Cannot find object - * -DER_EP_OLD Epoch is too old and has no data for - * this object + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_open(daos_handle_t coh, daos_obj_id_t oid, daos_handle_t th, @@ -203,9 +198,9 @@ daos_array_open(daos_handle_t coh, daos_obj_id_t oid, daos_handle_t th, * be set to DAOS_OF_DKEY_UINT64 | DAOS_OF_KV_FLAT. * \param[in] th Transaction handle. * \param[in] mode Open mode: DAOS_OO_RO/RW - * \param[out] cell_size + * \param[in] cell_size * Record size of the array. - * \param[out] chunk_size + * \param[in] chunk_size * Contiguous bytes to store per DKey before moving to a * differen dkey. * \param[out] oh Returned array object open handle. @@ -217,11 +212,7 @@ daos_array_open(daos_handle_t coh, daos_obj_id_t oid, daos_handle_t th, * 0 Success * -DER_NO_HDL Invalid container handle * -DER_INVAL Invalid parameter - * -DER_UNREACH Network is unreachable * -DER_NO_PERM Permission denied - * -DER_NONEXIST Cannot find object - * -DER_EP_OLD Epoch is too old and has no data for - * this object */ DAOS_API int daos_array_open_with_attr(daos_handle_t coh, daos_obj_id_t oid, @@ -309,7 +300,6 @@ daos_array_close(daos_handle_t oh, daos_event_t *ev); * -DER_UNREACH Network is unreachable * -DER_REC2BIG Record is too large and can't be * fit into output buffer - * -DER_EP_OLD Epoch is too old and has no data */ DAOS_API int daos_array_read(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, @@ -335,7 +325,6 @@ daos_array_read(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, * -DER_UNREACH Network is unreachable * -DER_REC2BIG Record is too large and can't be * fit into output buffer - * -DER_EP_OLD Epoch is too old and has no data */ DAOS_API int daos_array_write(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, @@ -350,7 +339,12 @@ daos_array_write(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, * \param[in] ev Completion event, it is optional and can be NULL. * Function will run in blocking mode if \a ev is NULL. * - * \return 0 on Success, negative on failure. + * \return These values will be returned by \a ev::ev_error in + * non-blocking mode: + * 0 Success + * -DER_NO_HDL Invalid object open handle + * -DER_INVAL Invalid parameter + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_get_size(daos_handle_t oh, daos_handle_t th, daos_size_t *size, @@ -368,7 +362,12 @@ daos_array_get_size(daos_handle_t oh, daos_handle_t th, daos_size_t *size, * \param[in] ev Completion event, it is optional and can be NULL. * Function will run in blocking mode if \a ev is NULL. * - * \return 0 on Success, negative on failure. + * \return These values will be returned by \a ev::ev_error in + * non-blocking mode: + * 0 Success + * -DER_NO_HDL Invalid object open handle + * -DER_INVAL Invalid parameter + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_set_size(daos_handle_t oh, daos_handle_t th, daos_size_t size, @@ -388,7 +387,12 @@ daos_array_set_size(daos_handle_t oh, daos_handle_t th, daos_size_t size, * \param[in] ev Completion event, it is optional and can be NULL. * Function will run in blocking mode if \a ev is NULL. * - * \return 0 on Success, negative on failure. + * \return These values will be returned by \a ev::ev_error in + * non-blocking mode: + * 0 Success + * -DER_NO_HDL Invalid object open handle + * -DER_INVAL Invalid parameter + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_destroy(daos_handle_t oh, daos_handle_t th, daos_event_t *ev); @@ -402,7 +406,12 @@ daos_array_destroy(daos_handle_t oh, daos_handle_t th, daos_event_t *ev); * \param[in] ev Completion event, it is optional and can be NULL. * Function will run in blocking mode if \a ev is NULL. * - * \return 0 on Success, negative on failure. + * \return These values will be returned by \a ev::ev_error in + * non-blocking mode: + * 0 Success + * -DER_NO_HDL Invalid object open handle + * -DER_INVAL Invalid parameter + * -DER_UNREACH Network is unreachable */ DAOS_API int daos_array_punch(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, @@ -417,7 +426,11 @@ daos_array_punch(daos_handle_t oh, daos_handle_t th, daos_array_iod_t *iod, * \param[out] cell_size * Cell size of the array. * - * \return 0 on Success, negative on failure. + * \return These values will be returned by \a ev::ev_error in + * non-blocking mode: + * 0 Success + * -DER_NO_HDL Invalid object open handle + * -DER_INVAL Invalid parameter */ DAOS_API int daos_array_get_attr(daos_handle_t oh, daos_size_t *chunk_size, diff --git a/src/include/daos_cont.h b/src/include/daos_cont.h index 5708bc96897..7935fc88f2c 100644 --- a/src/include/daos_cont.h +++ b/src/include/daos_cont.h @@ -496,7 +496,7 @@ daos_cont_aggregate(daos_handle_t coh, daos_epoch_t epoch, daos_event_t *ev); * Rollback to a specific persistent snapshot. * * \param[in] coh Container handle - * \param[in] epoch Epoch if persistent snapshot to rollback to. + * \param[in] epoch Epoch of a persistent snapshot to rollback to. * \param[in] ev Completion event, it is optional and can be NULL. * The function will run in blocking mode if \a ev is NULL. */ diff --git a/src/include/daos_event.h b/src/include/daos_event.h index e5b3a435872..c423137dd0c 100644 --- a/src/include/daos_event.h +++ b/src/include/daos_event.h @@ -1,5 +1,5 @@ /** - * (C) Copyright 2015, 2016 Intel Corporation. + * (C) Copyright 2015 - 2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -143,7 +143,10 @@ daos_eq_query(daos_handle_t eqh, daos_eq_query_t query, * just an easy way to combine multiple events completion * status into 1. * - * \return Zero on success, negative value if error + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NO_PERM Permission denied + * -DER_NONEXIST Event Queue does not exist */ DAOS_API int daos_event_init(daos_event_t *ev, daos_handle_t eqh, daos_event_t *parent); @@ -158,7 +161,9 @@ daos_event_init(daos_event_t *ev, daos_handle_t eqh, daos_event_t *parent); * * \param ev [IN] Event to finalize * - * \return Zero on success, negative value if error + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NONEXIST Event Queue does not exist */ DAOS_API int daos_event_fini(daos_event_t *ev); @@ -188,7 +193,11 @@ daos_event_next(daos_event_t *parent, daos_event_t *child); * \param flag [OUT] returned state of the event. true if the event is * finished (completed or aborted), false if in-flight. * - * \return Zero on success, negative value if error + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NO_PERM Permission denied + * -DER_NONEXIST Event Queue does not exist + * negative rc of associated operation of the event. */ DAOS_API int daos_event_test(struct daos_event *ev, int64_t timeout, bool *flag); @@ -208,7 +217,10 @@ daos_event_test(struct daos_event *ev, int64_t timeout, bool *flag); * * \param ev [IN] Parent event * - * \return Zero on success, negative value if error + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NO_PERM Permission denied + * -DER_NONEXIST Event Queue does not exist */ DAOS_API int daos_event_parent_barrier(struct daos_event *ev); @@ -219,7 +231,9 @@ daos_event_parent_barrier(struct daos_event *ev); * * \param ev [IN] Event (operation) to abort * - * \return Zero on success, negative value if error + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NO_PERM Permission denied */ DAOS_API int daos_event_abort(daos_event_t *ev); diff --git a/src/include/daos_fs.h b/src/include/daos_fs.h index 510648b8211..fc17b8541fe 100644 --- a/src/include/daos_fs.h +++ b/src/include/daos_fs.h @@ -1,5 +1,5 @@ /* - * (C) Copyright 2018-2019 Intel Corporation. + * (C) Copyright 2018-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,22 +39,25 @@ extern "C" { #include +/** Maximum Path length */ #define DFS_MAX_PATH NAME_MAX +/** Maximum file size */ #define DFS_MAX_FSIZE (~0ULL) +/** File/Directory/Symlink object handle struct */ typedef struct dfs_obj dfs_obj_t; +/** DFS mount handle struct */ typedef struct dfs dfs_t; +/** struct holding attributes for a DFS container */ typedef struct { - /* - * User ID for DFS container (Optional); can be mapped to a Lustre FID - * for example in the Unified namespace. - */ + /** Optional user ID for DFS container. */ uint64_t da_id; /** Default Chunk size for all files in container */ daos_size_t da_chunk_size; /** Default Object Class for all objects in the container */ daos_oclass_id_t da_oclass_id; + /** DAOS properties on the DFS container */ daos_prop_t *da_props; } dfs_attr_t; @@ -221,7 +224,7 @@ dfs_lookup(dfs_t *dfs, const char *path, int flags, dfs_obj_t **obj, */ DAOS_API int dfs_lookup_rel(dfs_t *dfs, dfs_obj_t *parent, const char *name, int flags, - dfs_obj_t **_obj, mode_t *mode, struct stat *stbuf); + dfs_obj_t **obj, mode_t *mode, struct stat *stbuf); /** * Create/Open a directory, file, or Symlink. @@ -327,6 +330,7 @@ dfs_read(dfs_t *dfs, dfs_obj_t *obj, d_sg_list_t *sgl, daos_off_t off, daos_size_t *read_size, daos_event_t *ev); /** + * Non-contiguous read interface to a DFS file. * Same as dfs_read with the ability to have a segmented file layout to read. * * \param[in] dfs Pointer to the mounted file system. @@ -361,7 +365,7 @@ dfs_write(dfs_t *dfs, dfs_obj_t *obj, d_sg_list_t *sgl, daos_off_t off, daos_event_t *ev); /** - * Write data to the file object. + * Non-contiguous write interface to a DFS file. * * \param[in] dfs Pointer to the mounted file system. * \param[in] obj Opened file object. @@ -496,7 +500,8 @@ dfs_remove(dfs_t *dfs, dfs_obj_t *parent, const char *name, bool force, * \param[in] name Link name of object. * \param[in] new_parent * Target parent directory object. If NULL, use root obj. - * \param[in] name New link name of object. + * \param[in] new_name + * New link name of object. * \param[in] oid Optionally return the DAOS Object ID of a removed obj * as a result of a rename. * @@ -561,7 +566,7 @@ dfs_get_chunk_size(dfs_obj_t *obj, daos_size_t *chunk_size); /** * Retrieve Symlink value of object if it's a symlink. If the buffer size passed * in is not large enough, we copy up to size of the buffer, and update the size - * to actual value size. + * to actual value size. The size returned includes the null terminator. * * \param[in] obj Open object to query. * \param[in] buf user buffer to copy the symlink value in. @@ -582,7 +587,7 @@ dfs_get_symlink_value(dfs_obj_t *obj, char *buf, daos_size_t *size); * is a local operation and doesn't change anything on the storage. * * \param[in] obj Open object handle to update. - * \param[in] parent_oid + * \param[in] parent_obj * Open object handle of new parent. * \param[in] name Optional new name of entry in parent. Pass NULL to leave * the entry name unchanged. @@ -629,9 +634,13 @@ dfs_stat(dfs_t *dfs, dfs_obj_t *parent, const char *name, DAOS_API int dfs_ostat(dfs_t *dfs, dfs_obj_t *obj, struct stat *stbuf); +/** Option to set the mode_t on an entry */ #define DFS_SET_ATTR_MODE (1 << 0) +/** Option to set the access time on an entry */ #define DFS_SET_ATTR_ATIME (1 << 1) +/** Option to set the modify time on an entry */ #define DFS_SET_ATTR_MTIME (1 << 2) +/** Option to set size of a file */ #define DFS_SET_ATTR_SIZE (1 << 3) /** diff --git a/src/include/daos_kv.h b/src/include/daos_kv.h index 66d482fe4df..48988a00305 100644 --- a/src/include/daos_kv.h +++ b/src/include/daos_kv.h @@ -36,9 +36,13 @@ extern "C" { #endif +/* Conditional Op: Insert key if it doesn't exist, fail otherwise */ #define DAOS_COND_KEY_INSERT DAOS_COND_DKEY_INSERT +/* Conditional Op: Update key if it exists, fail otherwise */ #define DAOS_COND_KEY_UPDATE DAOS_COND_DKEY_UPDATE +/* Conditional Op: Fetch key if it exists, fail otherwise */ #define DAOS_COND_KEY_FETCH DAOS_COND_DKEY_FETCH +/* Conditional Op: Punch key if it exists, fail otherwise */ #define DAOS_COND_KEY_PUNCH DAOS_COND_DKEY_PUNCH /** @@ -129,8 +133,8 @@ daos_kv_remove(daos_handle_t oh, daos_handle_t th, uint64_t flags, * nr [in]: number of key descriptors in \a kds. [out]: number * of returned key descriptors. * \param[in,out] - * kds [in]: preallocated array of \nr key descriptors. [out]: - * size of each individual key. + * kds [in]: preallocated array of \a nr key descriptors. + * [out]: size of each individual key. * \param[in] sgl Scatter/gather list to store the dkey list. * All keys are written contiguously, with actual * boundaries that can be calculated using \a kds. diff --git a/src/include/daos_mgmt.h b/src/include/daos_mgmt.h index 1e10e1a1979..6ff7821bab7 100644 --- a/src/include/daos_mgmt.h +++ b/src/include/daos_mgmt.h @@ -1,5 +1,5 @@ /** - * (C) Copyright 2016-2018 Intel Corporation. + * (C) Copyright 2016-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -365,7 +365,8 @@ enum { * \param rank [IN] Ranks to set parameter. -1 means setting on all servers. * \param key_id [IN] key ID of the parameter. * \param value [IN] value of the parameter. - * \param value [IN] optional extra value to set the fail value when + * \param value_extra [IN] + * optional extra value to set the fail value when * \a key_id is DMG_CMD_FAIL_LOC and \a value is in * DAOS_FAIL_VALUE mode. * \param ev [IN] Completion event, it is optional and can be NULL. diff --git a/src/include/daos_security.h b/src/include/daos_security.h index 161a5c79608..4315b931ac1 100644 --- a/src/include/daos_security.h +++ b/src/include/daos_security.h @@ -50,6 +50,7 @@ extern "C" { * terminator. */ #define DAOS_ACL_MAX_PRINCIPAL_LEN (255) +/** DAOS_ACL_MAX_PRINCIPAL_LEN including NULL terminator */ #define DAOS_ACL_MAX_PRINCIPAL_BUF_LEN (DAOS_ACL_MAX_PRINCIPAL_LEN + 1) /** @@ -317,8 +318,6 @@ daos_acl_add_ace(struct daos_acl **acl, struct daos_ace *new_ace); * \param[in] type Principal type of the ACE to remove * \param[in] principal_name Principal name of the ACE to remove * (NULL if type isn't user/group) - * \param[out] new_acl Reallocated copy of the ACL with the - * ACE removed * * \return 0 Success * -DER_INVAL Invalid input diff --git a/src/include/daos_task.h b/src/include/daos_task.h index 3abdbca7fa3..1743b552dce 100644 --- a/src/include/daos_task.h +++ b/src/include/daos_task.h @@ -138,364 +138,621 @@ typedef enum { DAOS_OPC_MAX } daos_opc_t; +/** svc rip params */ typedef struct { + /** Process set name of the DAOS servers managing the pool */ const char *grp; + /** rank to kill */ d_rank_t rank; + /** Abrupt shutdown, no cleanup */ bool force; } daos_svc_rip_t; +/** mgmt set params */ typedef struct { + /** Process set name of the DAOS servers managing the pool */ const char *grp; + /** Ranks to set parameter. -1 means setting on all servers */ d_rank_t rank; + /** key ID of the parameter */ uint32_t key_id; + /** value of the parameter */ uint64_t value; + /** optional extra value to set the fail */ uint64_t value_extra; } daos_set_params_t; +/** pool create params */ typedef struct { + /** Capabilities permitted for the pool. */ uint32_t mode; + /** User owning the pool */ uid_t uid; + /** Group owning the pool */ gid_t gid; + /** Process set name of the DAOS servers managing the pool. */ const char *grp; + /** Optional, allocate targets on this list of ranks. */ const d_rank_list_t *tgts; + /** String identifying the target devices to use. */ const char *dev; + /** Target SCM (Storage Class Memory) size in bytes. */ daos_size_t scm_size; + /** Target NVMe (Non-Volatile Memory express) size in bytes. */ daos_size_t nvme_size; + /** Optional, pool properties. */ daos_prop_t *prop; + /** Number of desired pool service replicas. */ d_rank_list_t *svc; + /** UUID of the pool created */ unsigned char *uuid; } daos_pool_create_t; +/** pool destory args */ typedef struct { + /** UUID of the pool to destroy. */ const uuid_t uuid; + /** Process set name of the DAOS servers managing the pool */ const char *grp; + /** Force destruction even if there are active connections */ int force; } daos_pool_destroy_t; +/** pool extend args */ typedef struct { + /** UUID of the pool to extend. */ const uuid_t uuid; + /** Process set name of the DAOS servers managing the pool. */ const char *grp; + /** Optional, only extend the pool to included targets. */ d_rank_list_t *tgts; + /** Optional, buffer to store faulty targets on failure. */ d_rank_list_t *failed; } daos_pool_extend_t; +/** pool evict args */ typedef struct { + /** UUID of the pool. */ const uuid_t uuid; + /** Process set name of the DAOS servers managing the pool. */ const char *grp; + /** list of pool service ranks. */ d_rank_list_t *svc; } daos_pool_evict_t; +/** pool connect args */ typedef struct { + /** UUID of the pool. */ const uuid_t uuid; + /** Process set name of the DAOS servers managing the pool. */ const char *grp; + /** Pool service replica ranks. */ const d_rank_list_t *svc; + /** Connect mode represented by the DAOS_PC_ bits. */ unsigned int flags; + /** Returned open handle. */ daos_handle_t *poh; + /** Optional, returned pool information. */ daos_pool_info_t *info; } daos_pool_connect_t; +/** poo disconnect args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; } daos_pool_disconnect_t; +/** pool target update (add/exclude) args */ typedef struct { + /** UUID of the pool. */ const uuid_t uuid; + /** Process set name of the DAOS servers managing the pool */ const char *grp; + /** Pool service replica ranks. */ d_rank_list_t *svc; + /** Target array */ struct d_tgt_list *tgts; } daos_pool_update_t; +/** pool query args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Optional, returned storage targets in this pool. */ d_rank_list_t *tgts; + /** Optional, returned pool information. */ daos_pool_info_t *info; + /** Optional, returned pool properties. */ daos_prop_t *prop; } daos_pool_query_t; +/** pool target query args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Array of targets to query. */ d_rank_list_t *tgts; + /** Optional, buffer to store faulty targets on failure. */ d_rank_list_t *failed; + /** Returned storage information of targets. */ daos_target_info_t *info_list; } daos_pool_query_target_t; +/** pool container list args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** [in] length of \a cont_buf. [out] num of containers in the pool. */ daos_size_t *ncont; + /** Array of container structures. */ struct daos_pool_cont_info *cont_buf; } daos_pool_list_cont_t; +/** pool list attributes args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Buffer containing concatenation of all attribute names. */ char *buf; + /** [in]: Buffer size. [out]: Aggregate size of all attribute names */ size_t *size; } daos_pool_list_attr_t; +/** pool get attributes args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Number of attributes. */ int n; + /** Array of \a n null-terminated attribute names. */ char const *const *names; + /** Array of \a n buffers to store attribute values. */ void *const *values; + /** [in]: Array of \a n buf sizes. [out]: Array of actual sizes. */ size_t *sizes; } daos_pool_get_attr_t; +/** pool set attributes args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Number of attributes. */ int n; + /** Array of \a n null-terminated attribute names. */ char const *const *names; + /** Array of \a n attribute values. */ void const *const *values; + /** Array of \a n elements containing the sizes of attribute values. */ size_t const *sizes; } daos_pool_set_attr_t; +/** pool add/remove replicas args */ typedef struct { + /** UUID of the pool. */ const uuid_t uuid; + /** Name of DAOS server process set managing the service. */ const char *group; + /** List of service ranks. */ d_rank_list_t *svc; + /** Ranks of the replicas to be added/removed. */ d_rank_list_t *targets; + /** Optional, list of ranks which could not be added/removed. */ d_rank_list_t *failed; } daos_pool_replicas_t; +/** pool management pool list args */ typedef struct { + /** Process set name of the DAOS servers managing the pool */ const char *grp; + /** Array of pool mgmt information structures. */ daos_mgmt_pool_info_t *pools; + /** length of array */ daos_size_t *npools; } daos_mgmt_list_pools_t; +/** pool service stop args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; } daos_pool_stop_svc_t; +/** Container create args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Container UUID. */ const uuid_t uuid; + /** Optional container properties. */ daos_prop_t *prop; } daos_cont_create_t; +/** Container open args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Container UUID. */ const uuid_t uuid; + /** Open mode, represented by the DAOS_COO_ bits.*/ unsigned int flags; + /** Returned container open handle. */ daos_handle_t *coh; + /** Optional, return container information. */ daos_cont_info_t *info; } daos_cont_open_t; +/** Container close args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; } daos_cont_close_t; +/** Container destory args */ typedef struct { + /** Pool open handle. */ daos_handle_t poh; + /** Container UUID. */ const uuid_t uuid; + /** Force destroy even if there is outstanding open handles. */ int force; } daos_cont_destroy_t; +/** Container query args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Returned container information. */ daos_cont_info_t *info; + /** Optional, returned container properties. */ daos_prop_t *prop; } daos_cont_query_t; +/** Container set properties args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Property entries to set/update. */ daos_prop_t *prop; } daos_cont_set_prop_t; +/** Container ACL update args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** ACL containing new/updated entries. */ struct daos_acl *acl; } daos_cont_update_acl_t; +/** Container ACL delete args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Principal type to be removed. */ uint8_t type; + /** Name of principal to be removed. */ d_string_t name; } daos_cont_delete_acl_t; +/** Container aggregate args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Epoch to be aggregated to. Current time if 0.*/ daos_epoch_t epoch; } daos_cont_aggregate_t; +/** Container rollback args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Epoch of a persistent snapshot to rollback to. */ daos_epoch_t epoch; } daos_cont_rollback_t; +/** Container subscribe args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /* + * [in]: epoch of snapshot to wait for. + * [out]: epoch of persistent snapshot taken. + */ daos_epoch_t *epoch; } daos_cont_subscribe_t; +/** Container attribute list args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Buffer containing concatenation of all attribute names. */ char *buf; + /** [in]: Buffer size. [out]: Aggregate size of all attribute names. */ size_t *size; } daos_cont_list_attr_t; +/** Container attribute get args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Number of attributes. */ int n; + /** Array of \a n null-terminated attribute names. */ char const *const *names; + /** Array of \a n buffers to store attribute values. */ void *const *values; + /**[in]: Array of \a n buffer sizes. [out]: Array of actual sizes */ size_t *sizes; } daos_cont_get_attr_t; +/** Container attribute set args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Number of attributes. */ int n; + /** Array of \a n null-terminated attribute names. */ char const *const *names; + /** Array of \a n attribute values. */ void const *const *values; + /** Array of \a n elements containing the sizes of attribute values. */ size_t const *sizes; } daos_cont_set_attr_t; +/** Container Object ID allocation args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Number of unique IDs requested. */ daos_size_t num_oids; + /** starting oid that was allocated up to oid + num_oids. */ uint64_t *oid; } daos_cont_alloc_oids_t; +/** Container snapshot listing args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /* + * [in]: Number of snapshots in epochs and names. + * [out]: Actual number of snapshots returned + */ int *nr; + /** preallocated array of epochs to store snapshots. */ daos_epoch_t *epochs; + /** preallocated array of names of the snapshots. */ char **names; + /** Hash anchor for the next call. */ daos_anchor_t *anchor; } daos_cont_list_snap_t; +/** Container snapshot creation args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Returned epoch of persistent snapshot taken. */ daos_epoch_t *epoch; + /** Optional null terminated name for snapshot. */ char *name; } daos_cont_create_snap_t; +/** Container snapshot destroy args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Epoch range of snapshots to destroy. */ daos_epoch_range_t epr; } daos_cont_destroy_snap_t; +/** Transaction Open args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Returned transaction open handle. */ daos_handle_t *th; } daos_tx_open_t; +/** Transaction commit args */ typedef struct { + /** Transaction open handle. */ daos_handle_t th; } daos_tx_commit_t; +/** Transaction abort args */ typedef struct { + /** Transaction open handle. */ daos_handle_t th; } daos_tx_abort_t; +/** Transaction snapshot open args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Epoch of persistent snapshot to read from. */ daos_epoch_t epoch; + /** Returned transaction open handle. */ daos_handle_t *th; } daos_tx_open_snap_t; +/** Transaction close args */ typedef struct { + /** Transaction open handle. */ daos_handle_t th; } daos_tx_close_t; +/** Object class register args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Object class ID. */ daos_oclass_id_t cid; + /** Object class attributes. */ struct daos_oclass_attr *cattr; } daos_obj_register_class_t; +/** Object class query args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Object class ID. */ daos_oclass_id_t cid; + /** Object class attributes. */ struct daos_oclass_attr *cattr; } daos_obj_query_class_t; +/** Object class list args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Sink buffer for returned class list. */ struct daos_oclass_list *clist; + /** Hash anchor for the next call. */ daos_anchor_t *anchor; } daos_obj_list_class_t; +/** Object open args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Object ID. */ daos_obj_id_t oid; + /** Object open mode. */ unsigned int mode; + /** Returned object handle. */ daos_handle_t *oh; } daos_obj_open_t; +/** Object close args */ typedef struct { + /** Object open handle */ daos_handle_t oh; } daos_obj_close_t; -/* NB: +/* + * Object & Object Key Punch args. + * NB: * - If @dkey is NULL, it is parameter for object punch. * - If @akeys is NULL, it is parameter for dkey punch. * - API allows user to punch multiple dkeys, in this case, client module needs * to allocate multiple instances of this data structure. */ typedef struct { + /** Object open handle */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Distribution Key. */ daos_key_t *dkey; + /** Array of attribute keys. */ daos_key_t *akeys; + /** Operation flags. */ uint64_t flags; + /** Number of akeys in \a akeys. */ unsigned int akey_nr; } daos_obj_punch_t; +/** Object query args */ typedef struct { + /** Object open handle */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Returned object attributes. */ struct daos_obj_attr *oa; + /** Ordered list of ranks where the object is stored. */ d_rank_list_t *ranks; } daos_obj_query_t; +/** Object key query args */ typedef struct { + /** Object open handle */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /* + * [in]: allocated integer dkey. + * [out]: max or min dkey (if flag includes dkey query). + */ daos_key_t *dkey; + /* + * [in]: allocated integer akey. + * [out]: max or min akey (if flag includes akey query). + */ daos_key_t *akey; + /** max or min offset in key, and size of the extent at the offset. */ daos_recx_t *recx; + /** Operation flags. */ uint64_t flags; } daos_obj_query_key_t; +/** Object fetch/update args */ typedef struct { + /** Object open handle */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Operation flags. */ uint64_t flags; + /** Distribution Key. */ daos_key_t *dkey; + /** Number of elements in \a iods and \a sgls. */ unsigned int nr; + /** IO descriptor describing IO layout in the object. */ daos_iod_t *iods; + /** Scatter / gather list for a memory descriptor. */ d_sg_list_t *sgls; - daos_iom_t *maps; /* only valid for fetch */ + /** IO Map - only valid for fetch. */ + daos_iom_t *maps; } daos_obj_rw_t; +/** fetch args struct */ typedef daos_obj_rw_t daos_obj_fetch_t; +/** update args struct */ typedef daos_obj_rw_t daos_obj_update_t; +/** Object shard fetch args */ struct daos_obj_fetch_shard { + /** base. */ daos_obj_fetch_t base; + /** Operation flags. */ unsigned int flags; + /** shard. */ unsigned int shard; }; +/** Object sync args */ struct daos_obj_sync_args { + /** Object open handle */ daos_handle_t oh; + /** epoch. */ daos_epoch_t epoch; + /** epochp. */ daos_epoch_t **epochs_p; + /** nr. */ int *nr; }; +/** Object list args */ typedef struct { + /** Object open handle */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Distribution key. */ daos_key_t *dkey; + /** Attribute key. */ daos_key_t *akey; - uint32_t *nr; /* number of dkeys/akeys/kds entries */ + /** number of dkeys/akeys/kds entries */ + uint32_t *nr; + /** Key descriptors holding enumerated keys. */ daos_key_desc_t *kds; + /** Scatter gather list for memory buffer. */ d_sg_list_t *sgl; - daos_size_t *size; /*total buf size for sgl buf, in case - *it uses bulk transfer - */ + /** total buf size for sgl buf, in case it uses bulk transfer. */ + daos_size_t *size; + /** type of value. */ daos_iod_type_t type; + /** record extents. */ daos_recx_t *recxs; + /** epoch ranges */ daos_epoch_range_t *eprs; /* anchors for obj list - * list_dkey uses dkey_anchor, @@ -504,9 +761,13 @@ typedef struct { * list_obj uses all the 3 anchors. */ daos_anchor_t *anchor; + /** anchor for list_dkey. */ daos_anchor_t *dkey_anchor; + /** anchor for list_akey. */ daos_anchor_t *akey_anchor; + /** versions. */ uint32_t *versions; + /** order. */ bool incr_order; } daos_obj_list_t; @@ -570,85 +831,148 @@ typedef daos_obj_list_t daos_obj_list_recx_t; */ typedef daos_obj_list_t daos_obj_list_obj_t; +/** Array create args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Array ID. */ daos_obj_id_t oid; + /** Transaction open handle. */ daos_handle_t th; + /** Size of array records. */ daos_size_t cell_size; + /** Number of records stored under 1 dkey. */ daos_size_t chunk_size; + /** Returned array open handle */ daos_handle_t *oh; } daos_array_create_t; +/** Array open args */ typedef struct { + /** Container open handle. */ daos_handle_t coh; + /** Array ID, */ daos_obj_id_t oid; + /** Transaction open handle. */ daos_handle_t th; + /** Open mode. */ unsigned int mode; + /** flag whether cell and chunk size are user provided. */ unsigned int open_with_attr; + /** Size of array records. */ daos_size_t *cell_size; + /** Number if records stored under 1 dkey. */ daos_size_t *chunk_size; + /** Returned Array open handle */ daos_handle_t *oh; } daos_array_open_t; +/** Array close args */ typedef struct { + /** Array open handle. */ daos_handle_t oh; } daos_array_close_t; +/** Array read/write args */ typedef struct { + /** Array open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Array IO descriptos. */ daos_array_iod_t *iod; + /** memory descriptors. */ d_sg_list_t *sgl; } daos_array_io_t; +/** Array get size args */ typedef struct { - daos_handle_t oh; + /** Array open handle. */ + daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Returned array size in number of records. */ daos_size_t *size; } daos_array_get_size_t; +/** Array set size args */ typedef struct { - daos_handle_t oh; + /** Array open handle. */ + daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** truncate size of the array. */ daos_size_t size; } daos_array_set_size_t; +/** Array destroy args */ typedef struct { + /** Array open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; } daos_array_destroy_t; +/** KV get args */ typedef struct { + /** KV open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Operation flags. */ uint64_t flags; + /** Key. */ const char *key; + /** Value buffer size. */ daos_size_t *buf_size; + /** Value buffer. */ void *buf; } daos_kv_get_t; +/** KV put args */ typedef struct { + /** KV open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Operation flags. */ uint64_t flags; + /** Key. */ const char *key; + /** Value size. */ daos_size_t buf_size; + /** Value buffer. */ const void *buf; } daos_kv_put_t; +/** KV remove args */ typedef struct { + /** KV open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /** Operation flags. */ uint64_t flags; + /** Key. */ const char *key; } daos_kv_remove_t; +/** KV list args */ typedef struct { + /** KV open handle. */ daos_handle_t oh; + /** Transaction open handle. */ daos_handle_t th; + /* + * [in]: number of key descriptors in \a kds. + * [out]: number of returned key descriptors. + */ uint32_t *nr; + /** key descriptors. */ daos_key_desc_t *kds; + /** memory descriptors. */ d_sg_list_t *sgl; + /** Hash anchor for the next call. */ daos_anchor_t *anchor; } daos_kv_list_t; @@ -672,8 +996,9 @@ typedef struct { * before it's scheduled. * \param taskp [OUT] Pointer to task to be created/initalized with the op. * - * \return 0 if task creation succeeds. - * negative errno if it fails. + * \return 0 Success + * -DER_INVAL Invalid parameter + * -DER_NOSYS Unsupported opc */ DAOS_API int daos_task_create(daos_opc_t opc, tse_sched_t *sched, @@ -726,7 +1051,7 @@ daos_task_set_priv(tse_task_t *task, void *priv); * \param is_empty [OUT] * flag to indicate whether the scheduler is empty or not. * - * \return 0 if Success, errno if failed. + * \return 0 if Success, negative DER if failed. */ DAOS_API int daos_progress(tse_sched_t *sched, int64_t timeout, bool *is_empty); diff --git a/src/include/daos_types.h b/src/include/daos_types.h index 46b42002db2..f15f8f0317a 100644 --- a/src/include/daos_types.h +++ b/src/include/daos_types.h @@ -210,7 +210,9 @@ typedef enum { * index within the rank */ struct d_tgt_list { + /** array of ranks */ d_rank_t *tl_ranks; + /** array of targets */ int32_t *tl_tgts; /** number of ranks & tgts */ uint32_t tl_nr; diff --git a/src/include/daos_uns.h b/src/include/daos_uns.h index c6acf69c16d..6134caaf23c 100644 --- a/src/include/daos_uns.h +++ b/src/include/daos_uns.h @@ -1,5 +1,5 @@ /* - * (C) Copyright 2019 Intel Corporation. + * (C) Copyright 2019 - 2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ extern "C" { #endif +/** struct that has the values to make the connection from the UNS to DAOS */ struct duns_attr_t { /** Pool uuid of the container. */ uuid_t da_puuid; @@ -54,7 +55,9 @@ struct duns_attr_t { bool da_on_lustre; }; +/** extended attribute name that will container the UNS info */ #define DUNS_XATTR_NAME "user.daos" +/** Length of the extended attribute */ #define DUNS_MAX_XATTR_LEN 170 /** @@ -67,8 +70,8 @@ struct duns_attr_t { * * \param[in] poh Pool handle * \param[in] path Valid path in an existing namespace. - * \param[in/out] - * attr Struct containing the attributes. The uuid of the + * \param[in,out] + * attrp Struct containing the attributes. The uuid of the * container created is returned in da_cuuid. * * \return 0 on Success. Negative on Failure. diff --git a/src/iosrv/module.c b/src/iosrv/module.c index 9d0503fe08a..2cd0dffba74 100644 --- a/src/iosrv/module.c +++ b/src/iosrv/module.c @@ -273,20 +273,28 @@ dss_module_cleanup_all(void) struct loaded_mod *mod; int rc = 0; + D_INFO("Cleaning up all loaded modules\n"); D_MUTEX_LOCK(&loaded_mod_list_lock); + D_INFO("Iterating through loaded modules list\n"); d_list_for_each_entry_reverse(mod, &loaded_mod_list, lm_lk) { struct dss_module *m = mod->lm_dss_mod; - if (m->sm_cleanup == NULL) + if (m->sm_cleanup == NULL) { + D_INFO("Module %s: no sm_cleanup func\n", m->sm_name); continue; + } + D_INFO("Module %s: invoke sm_cleanup func\n", m->sm_name); rc = m->sm_cleanup(); if (rc != 0) { D_ERROR("failed to clean up module %s: "DF_RC"\n", m->sm_name, DP_RC(rc)); break; } + D_INFO("Module %s: cleaned up\n", m->sm_name); } + D_INFO("Done iterating through loaded modules list\n"); D_MUTEX_UNLOCK(&loaded_mod_list_lock); + D_INFO("Done cleaning up all loaded modules\n"); return rc; } diff --git a/src/mgmt/cli_mgmt.c b/src/mgmt/cli_mgmt.c index b1e2d6d5257..a8161f032ff 100644 --- a/src/mgmt/cli_mgmt.c +++ b/src/mgmt/cli_mgmt.c @@ -269,6 +269,18 @@ struct dc_mgmt_psr { char *uri; }; +#define copy_str(dest, src) \ +({ \ + int __rc = 1; \ + size_t __size = strnlen(src, sizeof(dest)); \ + \ + if (__size != sizeof(dest)) { \ + memcpy(dest, src, __size + 1); \ + __rc = 0; \ + } \ + __rc; \ +}) + /* * Get the attach info (i.e., the CaRT PSRs) for name. npsrs outputs the number * of elements in psrs. psrs outputs the array of struct dc_mgmt_psr objects. @@ -288,7 +300,6 @@ get_attach_info(const char *name, int *npsrs, struct dc_mgmt_psr **psrs, struct dc_mgmt_psr *p; int i; int rc; - int size; D_DEBUG(DB_MGMT, "getting attach info for %s\n", name); @@ -370,26 +381,20 @@ get_attach_info(const char *name, int *npsrs, struct dc_mgmt_psr **psrs, *psrs = p; if (sy_info) { - size = sizeof(sy_info->provider); - if (strnlen(resp->provider, size) == size) { + if (copy_str(sy_info->provider, resp->provider)) { D_ERROR("GetAttachInfo provider string too long\n"); D_GOTO(out_resp, rc = -DER_INVAL); } - strncpy(sy_info->provider, resp->provider, size); - size = sizeof(sy_info->interface); - if (strnlen(resp->interface, size) == size) { + if (copy_str(sy_info->interface, resp->interface)) { D_ERROR("GetAttachInfo interface string too long\n"); D_GOTO(out_resp, rc = -DER_INVAL); } - strncpy(sy_info->interface, resp->interface, size); - size = sizeof(sy_info->domain); - if (strnlen(resp->domain, size) == size) { + if (copy_str(sy_info->domain, resp->domain)) { D_ERROR("GetAttachInfo domain string too long\n"); D_GOTO(out_resp, rc = -DER_INVAL); } - strncpy(sy_info->domain, resp->domain, size); sy_info->crt_ctx_share_addr = resp->crtctxshareaddr; sy_info->crt_timeout = resp->crttimeout; diff --git a/src/object/srv_obj.c b/src/object/srv_obj.c index f3060ba6df3..00df3152651 100644 --- a/src/object/srv_obj.c +++ b/src/object/srv_obj.c @@ -2230,7 +2230,7 @@ ds_obj_sync_handler(crt_rpc_t *rpc) else oso->oso_epoch = min(epoch, osi->osi_epoch); - D_DEBUG(DB_IO, "start: "DF_UOID", epc "DF_U64"\n", + D_DEBUG(DB_IO, "obj_sync start: "DF_UOID", epc "DF_U64"\n", DP_UOID(osi->osi_oid), oso->oso_epoch); rc = obj_ioc_begin(osi->osi_oid, osi->osi_map_ver, @@ -2247,7 +2247,7 @@ ds_obj_sync_handler(crt_rpc_t *rpc) obj_reply_set_status(rpc, rc); obj_ioc_end(&ioc, rc); - D_DEBUG(DB_IO, "stop: "DF_UOID", epc "DF_U64", rd = %d\n", + D_DEBUG(DB_IO, "obj_sync stop: "DF_UOID", epc "DF_U64", rd = %d\n", DP_UOID(osi->osi_oid), oso->oso_epoch, rc); rc = crt_reply_send(rpc); diff --git a/src/tests/daos_racer.c b/src/tests/daos_racer.c index a34e3006715..8b0ed10dccb 100644 --- a/src/tests/daos_racer.c +++ b/src/tests/daos_racer.c @@ -1,5 +1,5 @@ /** - * (C) Copyright 2019 Intel Corporation. + * (C) Copyright 2019-2020 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -527,6 +527,22 @@ main(int argc, char **argv) continue; } + if (rc == -DER_NOSPACE) { + /* XXX: There is not enough space to sync the + * object, that may cause some committable + * DTX entries cannot be committed on some + * replica(s), then subsequent fetch from + * related replica(s) for verification + * against those DTX entries will not get + * the right data as to the verification + * logic may report fake inconsistency. + * + * So let's stop the verification. + */ + rc = 0; + break; + } + if (rc == -DER_MISMATCH) { fprintf(stderr, "Found inconsistency for obj " DF_OID"\n", DP_OID(oid)); diff --git a/src/tests/ftest/io/llnl_mpi4py.yaml b/src/tests/ftest/io/llnl_mpi4py.yaml index a39bf0c213d..fb9085ae28c 100644 --- a/src/tests/ftest/io/llnl_mpi4py.yaml +++ b/src/tests/ftest/io/llnl_mpi4py.yaml @@ -15,6 +15,8 @@ pool: scm_size: 1000000000 svcn: 1 control_method: dmg +container: + type: POSIX client_processes: np: 8 test_repo: diff --git a/src/tests/ftest/util/dfuse_utils.py b/src/tests/ftest/util/dfuse_utils.py index f99e802ea13..ee8b3ad0bbb 100644 --- a/src/tests/ftest/util/dfuse_utils.py +++ b/src/tests/ftest/util/dfuse_utils.py @@ -94,11 +94,11 @@ def __init__(self, hosts, tmp, dfuse_env=False, log_file=None): self.tmp = tmp self.dfuse_env = dfuse_env self.log_file = log_file + self.running_hosts = NodeSet() def __del__(self): - """Destroy Dfuse object and stop dfuse """ - # stop dfuse - self.stop() + if len(self.running_hosts): + self.log.error('Dfuse object deleted without shutting down') def create_mount_point(self): """Create dfuse directory @@ -110,11 +110,12 @@ def create_mount_point(self): raise CommandFailure("Mount point not specified, " "check test yaml file") - dir_exists, _ = general_utils.check_file_exists( + _, missing_nodes = general_utils.check_file_exists( self.hosts, self.mount_dir.value, directory=True) - if not dir_exists: + if len(missing_nodes): + cmd = "mkdir -p {}".format(self.mount_dir.value) - ret_code = general_utils.pcmd(self.hosts, cmd, timeout=30) + ret_code = general_utils.pcmd(missing_nodes, cmd, timeout=30) if len(ret_code) > 1 or 0 not in ret_code: error_hosts = NodeSet( ",".join( @@ -192,8 +193,12 @@ def run(self): # run dfuse command ret_code = general_utils.pcmd(self.hosts, env + self.__str__(), timeout=30) - # check for any failures - if len(ret_code) > 1 or 0 not in ret_code: + + if 0 in ret_code: + self.running_hosts.add(ret_code[0]) + del ret_code[0] + + if len(ret_code): error_hosts = NodeSet( ",".join( [str(node_set) for code, node_set in ret_code.items() @@ -202,6 +207,31 @@ def run(self): "Error starting dfuse on the following hosts: {}".format( error_hosts)) + if not self.check_running(fail_on_error=False): + self.log.info('Waiting five seconds for dfuse to start') + time.sleep(5) + self.check_running() + + def check_running(self, fail_on_error=True): + """Check dfuse is running + + Run a command to verify dfuse is running on hosts where it is supposed + to be. Use grep -v and rc=1 here so that if it isn't, then we can + see what is being used instead. + """ + retcodes = general_utils.pcmd(self.running_hosts, + "stat -c %T -f {0} | grep -v fuseblk".\ + format(self.mount_dir.value), + expect_rc=1) + if 1 in retcodes: + del retcodes[1] + if len(retcodes): + self.log.error('Errors checking running: %s', retcodes) + if not fail_on_error: + return False + raise CommandFailure('dfuse not running') + return True + def stop(self): """Stop dfuse Raises: @@ -214,26 +244,34 @@ def stop(self): Finally, try and remove the mount point, and that itself should work. """ - self.log.info('Stopping dfuse at %s', self.mount_dir.value) + self.log.info('Stopping dfuse at %s on %s', + self.mount_dir.value, + self.running_hosts) if self.mount_dir.value is None: return + + if not len(self.running_hosts): + return + + self.check_running() umount_cmd = "if [ -x '$(command -v fusermount)' ]; " umount_cmd += "then fusermount -u {0}; else fusermount3 -u {0}; fi".\ format(self.mount_dir.value) - ret_code = general_utils.pcmd(self.hosts, umount_cmd, timeout=30) - if len(ret_code) > 1 or 0 not in ret_code: - error_hosts = NodeSet( - ",".join( - [str(node_set) for code, node_set in ret_code.items() - if code != 0])) + ret_code = general_utils.pcmd(self.running_hosts, umount_cmd, timeout=30) + + if 0 in ret_code: + self.running_hosts.remove(ret_code[0]) + del ret_code[0] + + if len(self.running_hosts): cmd = "pkill dfuse --signal KILL" - general_utils.pcmd(error_hosts, cmd, timeout=30) - general_utils.pcmd(error_hosts, umount_cmd, timeout=30) + general_utils.pcmd(self.running_hosts, cmd, timeout=30) + general_utils.pcmd(self.running_hosts, umount_cmd, timeout=30) self.remove_mount_point(fail=False) raise CommandFailure( "Error stopping dfuse on the following hosts: {}".format( - error_hosts)) + self.running_hosts)) time.sleep(2) self.remove_mount_point() diff --git a/src/tests/ftest/util/fio_test_base.py b/src/tests/ftest/util/fio_test_base.py index 3768c1ec2c0..84f872d8fec 100755 --- a/src/tests/ftest/util/fio_test_base.py +++ b/src/tests/ftest/util/fio_test_base.py @@ -69,7 +69,8 @@ def setUp(self): def tearDown(self): """Tear down each test case.""" try: - self.dfuse = None + if self.dfuse: + self.dfuse.stop() finally: # Stop the servers and agents super(FioBase, self).tearDown() @@ -147,3 +148,7 @@ def execute_fio(self): # Run Fio self.fio_cmd.hosts = self.hostlist_clients self.fio_cmd.run() + + if self.dfuse: + self.dfuse.stop() + self.dfuse = None diff --git a/src/tests/ftest/util/ior_test_base.py b/src/tests/ftest/util/ior_test_base.py index 5e63ab51a0a..2a2587486dd 100644 --- a/src/tests/ftest/util/ior_test_base.py +++ b/src/tests/ftest/util/ior_test_base.py @@ -80,7 +80,8 @@ def setUp(self): def tearDown(self): """Tear down each test case.""" try: - self.dfuse = None + if self.dfuse: + self.dfuse.stop() finally: # Stop the servers and agents super(IorTestBase, self).tearDown() @@ -104,7 +105,7 @@ def create_cont(self): # create container self.container.create(con_in=self.co_prop) - def start_dfuse(self): + def _start_dfuse(self): """Create a DfuseCommand object to start dfuse.""" # Get Dfuse params self.dfuse = Dfuse(self.hostlist_clients, self.tmp, @@ -145,7 +146,7 @@ def run_ior_with_pool(self, intercept=None, test_file_suffix=""): # Uncomment below two lines once DAOS-3355 is resolved if self.ior_cmd.transfer_size.value == "256B": return "Skipping the case for transfer_size=256B" - self.start_dfuse() + self._start_dfuse() testfile = os.path.join(self.dfuse.mount_dir.value, "testfile{}".format(test_file_suffix)) @@ -154,6 +155,9 @@ def run_ior_with_pool(self, intercept=None, test_file_suffix=""): out = self.run_ior(self.get_job_manager_command(), self.processes, intercept) + if self.dfuse: + self.dfuse.stop() + self.dfuse = None return out def update_ior_cmd_with_pool(self): @@ -228,7 +232,7 @@ def run_multiple_ior_with_pool(self, results, intercept=None): # start dfuse for POSIX api. This is specific to interception # library test requirements. - self.start_dfuse() + self._start_dfuse() # Create two jobs and run in parallel. # Job1 will have 3 client set up to use dfuse + interception @@ -247,6 +251,8 @@ def run_multiple_ior_with_pool(self, results, intercept=None): job2.start() job1.join() job2.join() + self.dfuse.stop() + self.dfuse = None def get_new_job(self, clients, job_num, results, intercept=None): """Create a new thread for ior run diff --git a/src/tests/ftest/util/mdtest_test_base.py b/src/tests/ftest/util/mdtest_test_base.py index 28873b72c9f..e54aa08fd34 100755 --- a/src/tests/ftest/util/mdtest_test_base.py +++ b/src/tests/ftest/util/mdtest_test_base.py @@ -83,7 +83,8 @@ def setUp(self): def tearDown(self): """Tear down each test case.""" try: - self.dfuse = None + if self.dfuse: + self.dfuse.stop() finally: # Stop the servers and agents super(MdtestBase, self).tearDown() @@ -121,6 +122,7 @@ def _create_cont(self): def _start_dfuse(self): """Create a DfuseCommand object to start dfuse.""" # Get Dfuse params + self.dfuse = Dfuse(self.hostlist_clients, self.tmp, log_file=get_log_file(self.client_log), @@ -136,11 +138,10 @@ def _start_dfuse(self): self.dfuse.run() except CommandFailure as error: self.log.error("Dfuse command %s failed on hosts %s", - str(self.dfuse), str(NodeSet(self.dfuse.hosts)), + str(self.dfuse), self.dfuse.hosts, exc_info=error) self.fail("Unable to launch Dfuse.\n") - def execute_mdtest(self): """Runner method for Mdtest.""" @@ -162,6 +163,9 @@ def execute_mdtest(self): # Run Mdtest self.run_mdtest(self.get_job_manager_command(self.manager), self.processes) + if self.dfuse: + self.dfuse.stop() + self.dfuse = None def get_job_manager_command(self, manager): """Get the MPI job manager command for Mdtest. diff --git a/src/tests/ftest/util/mpio_test_base.py b/src/tests/ftest/util/mpio_test_base.py index dd440a622e6..7359c900c05 100755 --- a/src/tests/ftest/util/mpio_test_base.py +++ b/src/tests/ftest/util/mpio_test_base.py @@ -24,10 +24,12 @@ from __future__ import print_function import os +import re from apricot import TestWithServers from mpio_utils import MpioUtils, MpioFailed from test_utils_pool import TestPool +from daos_utils import DaosCommand class LlnlMpi4pyHdf5(TestWithServers): @@ -41,16 +43,46 @@ def __init__(self, *args, **kwargs): super(LlnlMpi4pyHdf5, self).__init__(*args, **kwargs) self.hostfile_clients_slots = None self.mpio = None + self.daos_cmd = None + self.cont_uuid = None def setUp(self): super(LlnlMpi4pyHdf5, self).setUp() + # initialise daos_cmd + self.daos_cmd = DaosCommand(self.bin) + # initialize a python pool object then create the underlying self.pool = TestPool( self.context, dmg_command=self.get_dmg_command()) self.pool.get_params(self) self.pool.create() + def _create_cont(self, doas_cmd): + """Create a container. + + Args: + daos_cmd (DaosCommand): doas command to issue the container + create + + Returns: + str: UUID of the created container + + """ + cont_type = self.params.get("type", "/run/container/*") + result = self.daos_cmd.container_create( + pool=self.pool.uuid, svc=self.pool.svc_ranks, + cont_type=cont_type) + + # Extract the container UUID from the daos container create output + cont_uuid = re.findall( + "created\s+container\s+([0-9a-f-]+)", result.stdout) + if not cont_uuid: + self.fail( + "Error obtaining the container uuid from: {}".format( + result.stdout)) + self.cont_uuid = cont_uuid[0] + def run_test(self, test_repo, test_name): """ Executable function to be used by test functions below @@ -65,11 +97,15 @@ def run_test(self, test_repo, test_name): # initialise test specific variables client_processes = self.params.get("np", '/run/client_processes/') + # create container for mpi4py + if test_name == "mpi4py": + self._create_cont(self.daos_cmd) + try: # running tests self.mpio.run_llnl_mpi4py_hdf5( self.hostfile_clients, self.pool.uuid, self.pool.svc_ranks, - test_repo, test_name, client_processes) + test_repo, test_name, client_processes, self.cont_uuid) except MpioFailed as excep: self.fail("<{0} Test Failed> \n{1}".format(test_name, excep)) diff --git a/src/tests/ftest/util/mpio_utils.py b/src/tests/ftest/util/mpio_utils.py index ddc3d4748c2..0be946b12aa 100644 --- a/src/tests/ftest/util/mpio_utils.py +++ b/src/tests/ftest/util/mpio_utils.py @@ -95,7 +95,7 @@ def run_romio(hostlist, romio_test_repo): .format(str(excep))) # pylint: disable=R0913 def run_llnl_mpi4py_hdf5(self, hostfile, pool_uuid, svcl, test_repo, - test_name, client_processes): + test_name, client_processes, cont_uuid): """ Running LLNL, MPI4PY and HDF5 testsuites Function Arguments: @@ -126,19 +126,7 @@ def run_llnl_mpi4py_hdf5(self, hostfile, pool_uuid, svcl, test_repo, cmd = " ".join(test_cmd) elif test_name == "mpi4py" and \ os.path.isfile(os.path.join(test_repo, "test_io_daos.py")): - cmd = "daos cont create --pool={} --svc={} --type=POSIX".format( - pool_uuid, 0) - try: - container = subprocess.Popen(cmd, stdout=subprocess.PIPE, - shell=True) - (output, err) = container.communicate() - - print("Container created: {}".format(output.split()[3])) - env["DAOS_CONT"] = "{}".format(output.split()[3]) - - except subprocess.CalledProcessError as err: - raise MpioFailed(" \nException occurred: {}" - .format(err)) + env["DAOS_CONT"] = "{}".format(cont_uuid) test_cmd = [env.get_export_str(), mpirun, diff --git a/src/tests/ftest/util/test_utils.py b/src/tests/ftest/util/test_utils.py deleted file mode 100644 index 30348b66124..00000000000 --- a/src/tests/ftest/util/test_utils.py +++ /dev/null @@ -1,1228 +0,0 @@ -#!/usr/bin/python -""" - (C) Copyright 2018-2019 Intel Corporation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE - The Government's rights to use, modify, reproduce, release, perform, display, - or disclose this software are subject to the terms of the Apache License as - provided in Contract No. B609815. - Any reproduction of computer software, computer software documentation, or - portions thereof marked with this legend must also reproduce the markings. -""" -from logging import getLogger -import os -from time import sleep, time - -from avocado import fail_on -from avocado.utils import process -from command_utils import BasicParameter, ObjectWithParameters -from pydaos.raw import (DaosApiError, DaosServer, DaosContainer, DaosPool, - c_uuid_to_str) -from general_utils import check_pool_files, get_random_string, DaosTestError -from env_modules import load_mpi - - -class CallbackHandler(object): - """Defines a callback method to use with DaosApi class methods.""" - - def __init__(self, delay=1): - """Create a CallbackHandler object. - - Args: - delay (int, optional): number of seconds to wait in between - checking if the callback() method has been called. - Defaults to 1. - """ - self.delay = delay - self.ret_code = None - self.obj = None - self._called = False - self.log = getLogger(__name__) - - def callback(self, event): - """Return an event from a DaosApi class method. - - Args: - event (CallbackEvent): event returned by the DaosApi class method - """ - # Get the return code and calling object from the event - self.ret_code = event.event.ev_error - self.obj = event.obj - - # Indicate that this method has being called - self._called = True - - def wait(self): - """Wait for this object's callback() method to be called.""" - # Reset the event return code and calling object - self.ret_code = None - self.obj = None - - # Wait for the callback() method to be called - while not self._called: - self.log.info(" Waiting ...") - sleep(self.delay) - - # Reset the flag indicating that the callback() method was called - self._called = False - - -class TestDaosApiBase(ObjectWithParameters): - # pylint: disable=too-few-public-methods - """A base class for functional testing of DaosPools objects.""" - - def __init__(self, namespace, cb_handler=None): - """Create a TestDaosApi object. - - Args: - namespace (str): yaml namespace (path to parameters) - cb_handler (CallbackHandler, optional): callback object to use with - the API methods. Defaults to None. - """ - super(TestDaosApiBase, self).__init__(namespace) - self.cb_handler = cb_handler - self.log = getLogger(__name__) - - def _call_method(self, method, kwargs): - """Call the DAOS API class method with the optional callback method. - - Args: - method (object): method to call - kwargs (dict): keyworded arguments for the method - """ - if self.cb_handler: - kwargs["cb_func"] = self.cb_handler.callback - - try: - method(**kwargs) - except DaosApiError as error: - # Log the exception to obtain additional trace information - self.log.debug( - "Exception raised by %s.%s(%s)", - method.__module__, method.__name__, - ", ".join( - ["{}={}".format(key, val) for key, val in kwargs.items()]), - exc_info=error) - # Raise the exception so it can be handled by the caller - raise error - - if self.cb_handler: - # Wait for the call back if one is provided - self.cb_handler.wait() - - def _check_info(self, check_list): - """Verify each info attribute value matches an expected value. - - Args: - check_list (list): a list of tuples containing the name of the - information attribute to check, the current value of the - attribute, and the expected value of the attribute. If the - expected value is specified as a string with a number preceeded - by '<', '<=', '>', or '>=' then this comparision will be used - instead of the defult '=='. - - Returns: - bool: True if at least one check has been specified and all the - actual and expected values match; False otherwise. - - """ - check_status = len(check_list) > 0 - for check, actual, expect in check_list: - # Determine which comparision to utilize for this check - compare = ("==", lambda x, y: x == y, "does not match") - if isinstance(expect, str): - comparisions = { - "<": (lambda x, y: x < y, "is too large"), - ">": (lambda x, y: x > y, "is too small"), - "<=": ( - lambda x, y: x <= y, "is too large or does not match"), - ">=": ( - lambda x, y: x >= y, "is too small or does not match"), - } - for key, val in comparisions.items(): - # If the expected value is preceeded by one of the known - # comparision keys, use the comparision and remove the key - # from the expected value - if expect[:len(key)] == key: - compare = (key, val[0], val[1]) - expect = expect[len(key):] - try: - expect = int(expect) - except ValueError: - # Allow strings to be strings - pass - break - self.log.info( - "Verifying the %s %s: %s %s %s", - self.__class__.__name__.replace("Test", "").lower(), - check, actual, compare[0], expect) - if not compare[1](actual, expect): - msg = " The {} {}: actual={}, expected={}".format( - check, compare[2], actual, expect) - self.log.error(msg) - check_status = False - return check_status - - -class TestPool(TestDaosApiBase): - """A class for functional testing of DaosPools objects.""" - - def __init__(self, context, log=None, cb_handler=None): - # pylint: disable=unused-argument - """Initialize a TestPool object. - - Note: 'log' is now a defunct argument and will be removed in the future - - Args: - context (DaosContext): [description] - log (logging): logging object used to report the pool status - cb_handler (CallbackHandler, optional): callback object to use with - the API methods. Defaults to None. - """ - super(TestPool, self).__init__("/run/pool/*", cb_handler) - self.context = context - self.uid = os.geteuid() - self.gid = os.getegid() - - self.mode = BasicParameter(None) - self.name = BasicParameter(None) # server group name - self.svcn = BasicParameter(None) - self.target_list = BasicParameter(None) - self.scm_size = BasicParameter(None) - self.nvme_size = BasicParameter(None) - - self.pool = None - self.uuid = None - self.info = None - self.svc_ranks = None - self.connected = False - - @fail_on(DaosApiError) - def create(self): - """Create a pool. - - Destroys an existing pool if defined and assigns self.pool and - self.uuid. - """ - self.destroy() - if self.target_list.value is not None: - self.log.info( - "Creating a pool on targets %s", self.target_list.value) - else: - self.log.info("Creating a pool") - self.pool = DaosPool(self.context) - kwargs = { - "mode": self.mode.value, "uid": self.uid, "gid": self.gid, - "scm_size": self.scm_size.value, "group": self.name.value} - for key in ("target_list", "svcn", "nvme_size"): - value = getattr(self, key).value - if value: - kwargs[key] = value - self._call_method(self.pool.create, kwargs) - self.uuid = self.pool.get_uuid_str() - self.svc_ranks = [ - int(self.pool.svc.rl_ranks[index]) - for index in range(self.pool.svc.rl_nr)] - self.log.info( - " Pool created with uuid %s and svc ranks %s", - self.uuid, self.svc_ranks) - - @fail_on(DaosApiError) - def connect(self, permission=1): - """Connect to the pool. - - Args: - permission (int, optional): connect permission. Defaults to 1. - - Returns: - bool: True if the pool has been connected; False if the pool was - already connected or the pool is not defined. - - """ - if self.pool and not self.connected: - kwargs = {"flags": 1 << permission} - self.log.info( - "Connecting to pool %s with permission %s (flag: %s)", - self.uuid, permission, kwargs["flags"]) - self._call_method(self.pool.connect, kwargs) - self.connected = True - return True - return False - - @fail_on(DaosApiError) - def disconnect(self): - """Disconnect from connected pool. - - Returns: - bool: True if the pool has been disconnected; False if the pool was - already disconnected or the pool is not defined. - - """ - if self.pool and self.connected: - self.log.info("Disonnecting from pool %s", self.uuid) - self._call_method(self.pool.disconnect, {}) - self.connected = False - return True - return False - - @fail_on(DaosApiError) - def destroy(self, force=1): - """Destroy the pool. - - Args: - force (int, optional): force flag. Defaults to 1. - - Returns: - bool: True if the pool has been destroyed; False if the pool is not - defined. - - """ - if self.pool: - self.disconnect() - self.log.info("Destroying pool %s", self.uuid) - if self.pool.attached: - self._call_method(self.pool.destroy, {"force": force}) - self.pool = None - self.uuid = None - self.info = None - self.svc_ranks = None - return True - return False - - @fail_on(DaosApiError) - def get_info(self): - """Query the pool for information. - - Sets the self.info attribute. - """ - if self.pool: - self.connect() - self._call_method(self.pool.pool_query, {}) - self.info = self.pool.pool_info - - def check_pool_info(self, pi_uuid=None, pi_ntargets=None, pi_nnodes=None, - pi_ndisabled=None, pi_map_ver=None, pi_leader=None, - pi_bits=None): - # pylint: disable=unused-argument - """Check the pool info attributes. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Args: - pi_uuid (str, optional): pool uuid. Defaults to None. - pi_ntargets (int, optional): number of targets. Defaults to None. - pi_nnodes (int, optional): number of nodes. Defaults to None. - pi_ndisabled (int, optional): number of disabled. Defaults to None. - pi_map_ver (int, optional): pool map version. Defaults to None. - pi_leader (int, optional): pool leader. Defaults to None. - pi_bits (int, optional): pool bits. Defaults to None. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Returns: - bool: True if at least one expected value is specified and all the - specified values match; False otherwise - - """ - self.get_info() - checks = [ - (key, - c_uuid_to_str(getattr(self.info, key)) - if key == "pi_uuid" else getattr(self.info, key), - val) - for key, val in locals().items() - if key != "self" and val is not None] - return self._check_info(checks) - - def check_pool_space(self, ps_free_min=None, ps_free_max=None, - ps_free_mean=None, ps_ntargets=None, ps_padding=None): - # pylint: disable=unused-argument - """Check the pool info space attributes. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Args: - ps_free_min (list, optional): minimum free space per device. - Defaults to None. - ps_free_max (list, optional): maximum free space per device. - Defaults to None. - ps_free_mean (list, optional): mean free space per device. - Defaults to None. - ps_ntargets (int, optional): number of targets. Defaults to None. - ps_padding (int, optional): space padding. Defaults to None. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Returns: - bool: True if at least one expected value is specified and all the - specified values match; False otherwise - - """ - self.get_info() - checks = [] - for key in ("ps_free_min", "ps_free_max", "ps_free_mean"): - val = locals()[key] - if isinstance(val, list): - for index, item in val: - checks.append(( - "{}[{}]".format(key, index), - getattr(self.info.pi_space, key)[index], - item)) - for key in ("ps_ntargets", "ps_padding"): - val = locals()[key] - if val is not None: - checks.append(key, getattr(self.info.pi_space, key), val) - return self._check_info(checks) - - def check_pool_daos_space(self, s_total=None, s_free=None): - # pylint: disable=unused-argument - """Check the pool info daos space attributes. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Args: - s_total (list, optional): total space per device. Defaults to None. - s_free (list, optional): free space per device. Defaults to None. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Returns: - bool: True if at least one expected value is specified and all the - specified values match; False otherwise - - """ - self.get_info() - checks = [ - ("{}_{}".format(key, index), - getattr(self.info.pi_space.ps_space, key)[index], - item) - for key, val in locals().items() - if key != "self" and val is not None - for index, item in enumerate(val)] - return self._check_info(checks) - - def check_rebuild_status(self, rs_version=None, rs_seconds=None, - rs_errno=None, rs_done=None, rs_padding32=None, - rs_fail_rank=None, rs_toberb_obj_nr=None, - rs_obj_nr=None, rs_rec_nr=None, rs_size=None): - # pylint: disable=unused-argument - # pylint: disable=too-many-arguments - """Check the pool info rebuild attributes. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Args: - rs_version (int, optional): rebuild version. Defaults to None. - rs_seconds (int, optional): rebuild seconds. Defaults to None. - rs_errno (int, optional): rebuild error number. Defaults to None. - rs_done (int, optional): rebuild done flag. Defaults to None. - rs_padding32 (int, optional): padding. Defaults to None. - rs_fail_rank (int, optional): rebuild fail target. Defaults to None. - rs_toberb_obj_nr (int, optional): number of objects to be rebuilt. - Defaults to None. - rs_obj_nr (int, optional): number of rebuilt objects. - Defaults to None. - rs_rec_nr (int, optional): number of rebuilt records. - Defaults to None. - rs_size (int, optional): size of all rebuilt records. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Returns: - bool: True if at least one expected value is specified and all the - specified values match; False otherwise - - """ - self.get_info() - checks = [ - (key, getattr(self.info.pi_rebuild_st, key), val) - for key, val in locals().items() - if key != "self" and val is not None] - return self._check_info(checks) - - def rebuild_complete(self): - """Determine if the pool rebuild is complete. - - Returns: - bool: True if pool rebuild is complete; False otherwise - - """ - self.display_pool_rebuild_status() - return self.info.pi_rebuild_st.rs_done == 1 - - def wait_for_rebuild(self, to_start, interval=1): - """Wait for the rebuild to start or end. - - Args: - to_start (bool): whether to wait for rebuild to start or end - interval (int): number of seconds to wait in between rebuild - completion checks - """ - self.log.info( - "Waiting for rebuild to %s ...", - "start" if to_start else "complete") - while self.rebuild_complete() == to_start: - self.log.info( - " Rebuild %s ...", - "has not yet started" if to_start else "in progress") - sleep(interval) - self.log.info( - "Rebuild %s detected", "start" if to_start else "completion") - - @fail_on(DaosApiError) - def start_rebuild(self, ranks, daos_log): - """Kill the specific server ranks using this pool. - - Args: - ranks (list): a list of daos server ranks (int) to kill - daos_log (DaosLog): object for logging messages - - Returns: - bool: True if the server ranks have been killed and the ranks have - been excluded from the pool; False if the pool is undefined - - """ - msg = "Killing DAOS ranks {} from server group {}".format( - ranks, self.name.value) - self.log.info(msg) - daos_log.info(msg) - for rank in ranks: - server = DaosServer(self.context, self.name.value, rank) - self._call_method(server.kill, {"force": 1}) - return self.exclude(ranks, daos_log) - - @fail_on(DaosApiError) - def exclude(self, ranks, daos_log): - """Manually exclude a rank from this pool. - - Args: - ranks (list): a list daos server ranks (int) to exclude - daos_log (DaosLog): object for logging messages - - Returns: - bool: True if the ranks were excluded from the pool; False if the - pool is undefined - - """ - if self.pool: - msg = "Excluding server ranks {} from pool {}".format( - ranks, self.uuid) - self.log.info(msg) - daos_log.info(msg) - self._call_method(self.pool.exclude, {"rank_list": ranks}) - return True - return False - - def check_files(self, hosts): - """Check if pool files exist on the specified list of hosts. - - Args: - hosts (list): list of hosts - - Returns: - bool: True if the files for this pool exist on each host; False - otherwise - - """ - return check_pool_files(self.log, hosts, self.uuid.lower()) - - def write_file(self, orterun, processes, hostfile, size, timeout=60): - """Write a file to the pool. - - Args: - orterun (str): full path to the orterun command - processes (int): number of processes to launch - hosts (list): list of clients from which to write the file - size (int): size of the file to create in bytes - timeout (int, optional): number of seconds before timing out the - command. Defaults to 60 seconds. - - Returns: - process.CmdResult: command execution result - - """ - self.log.info("Writing %s bytes to pool %s", size, self.uuid) - env = { - "DAOS_POOL": self.uuid, - "DAOS_SVCL": "1", - "PYTHONPATH": os.getenv("PYTHONPATH", ""), - } - load_mpi("openmpi") - current_path = os.path.dirname(os.path.abspath(__file__)) - command = "{} --np {} --hostfile {} {} {} testfile".format( - orterun, processes, hostfile, - os.path.join(current_path, "write_some_data.py"), size) - return process.run(command, timeout, True, False, "both", True, env) - - def get_pool_daos_space(self): - """Get the pool info daos space attributes as a dictionary. - - Returns: - dict: a dictionary of lists of the daos space attributes - - """ - self.get_info() - keys = ("s_total", "s_free") - return {key: getattr(self.info.pi_space.ps_space, key) for key in keys} - - def display_pool_daos_space(self, msg=None): - """Display the pool info daos space attributes. - - Args: - msg (str, optional): optional text to include in the output. - Defaults to None. - """ - daos_space = self.get_pool_daos_space() - sizes = [ - "{}[{}]={}".format(key, index, item) - for key in sorted(daos_space.keys()) - for index, item in enumerate(daos_space[key])] - self.log.info( - "Pool %s space%s:\n %s", self.uuid, - " " + msg if isinstance(msg, str) else "", "\n ".join(sizes)) - - def get_pool_rebuild_status(self): - """Get the pool info rebuild status attributes as a dictionary. - - Returns: - dict: a dictionary of lists of the rebuild status attributes - - """ - self.get_info() - keys = ( - "rs_version", "rs_pad_32", "rs_errno", "rs_done", - "rs_toberb_obj_nr", "rs_obj_nr", "rs_rec_nr") - return {key: getattr(self.info.pi_rebuild_st, key) for key in keys} - - def display_pool_rebuild_status(self): - """Display the pool info rebuild status attributes.""" - status = self.get_pool_rebuild_status() - self.log.info( - "Pool rebuild status: %s", - ", ".join( - ["{}={}".format(key, status[key]) for key in sorted(status)])) - - def read_data_during_rebuild(self, container): - """Read data from the container while rebuild is active. - - Args: - container (TestContainer): container from which to read data - - Returns: - bool: True if all the data is read sucessfully befoire rebuild - completes; False otherwise - - """ - container.open() - self.log.info( - "Reading objects in container %s during rebuild", self.uuid) - - # Attempt to read all of the data from the container during rebuild - index = 0 - status = read_incomplete = index < len(container.written_data) - while not self.rebuild_complete() and read_incomplete: - try: - status &= container.written_data[index].read_object(container) - except DaosTestError as error: - self.log.error(str(error)) - status = False - index += 1 - read_incomplete = index < len(container.written_data) - - # Verify that all of the container data was read successfully - if read_incomplete: - self.log.error( - "Rebuild completed before all the written data could be read") - status = False - elif not status: - self.log.error("Errors detected reading data during rebuild") - return status - - -class TestContainerData(object): - """A class for storing data written to DaosContainer objects.""" - - def __init__(self, debug=False): - """Create a TestContainerData object. - - Args: - debug (bool, optional): if set log the write/read_record calls. - Defaults to False. - """ - self.obj = None - self.txn = None - self.records = [] - self.log = getLogger(__name__) - self.debug = debug - - def get_akeys(self): - """Get a list of all the akeys currently being used. - - Returns: - list: a list of all the used akeys - """ - return [record["akey"] for record in self.records] - - def get_dkeys(self): - """Get a list of all the dkeys currently being used. - - Returns: - list: a list of all the used dkeys - - """ - return [record["dkey"] for record in self.records] - - def _log_method(self, name, kwargs): - """Log the method call with its arguments. - - Args: - name (str): method name - kwargs (dict): dictionary of method arguments - """ - if self.debug: - args = ", ".join( - ["{}={}".format(key, kwargs[key]) for key in sorted(kwargs)]) - self.log.debug(" %s(%s)", name, args) - - def write_record(self, container, akey, dkey, data, rank=None, - obj_class=None): - """Write a record to the container. - - Args: - container (TestContainer): container in which to write the object - akey (str): the akey - dkey (str): the dkey - data (object): the data to write as a string or list - rank (int, optional): rank. Defaults to None. - obj_class (int, optional): daos object class. Defaults to None. - - Raises: - DaosTestError: if there was an error writing the record - """ - self.records.append({"akey": akey, "dkey": dkey, "data": data}) - kwargs = {"dkey": dkey, "akey": akey, "obj": self.obj, "rank": rank} - if obj_class: - kwargs["obj_cls"] = obj_class - try: - if isinstance(data, list): - kwargs["datalist"] = data - self._log_method("write_an_array_value", kwargs) - (self.obj, self.txn) = \ - container.container.write_an_array_value(**kwargs) - else: - kwargs["thedata"] = data - kwargs["size"] = len(data) - self._log_method("write_an_obj", kwargs) - (self.obj, self.txn) = \ - container.container.write_an_obj(**kwargs) - except DaosApiError as error: - raise DaosTestError( - "Error writing {}data (dkey={}, akey={}, data={}) to " - "container {}: {}".format( - "array " if isinstance(data, list) else "", dkey, akey, - data, container.uuid, error)) - - def write_object(self, container, record_qty, akey_size, dkey_size, - data_size, rank=None, obj_class=None, data_array_size=0): - """Write an object to the container. - - Args: - container (TestContainer): container in which to write the object - record_qty (int): the number of records to write - rank (int, optional): rank. Defaults to None. - obj_class (int, optional): daos object class. Defaults to None. - data_array_size (optional): write an array or single value. - Defaults to 0. - - Raises: - DaosTestError: if there was an error writing the object - """ - for _ in range(record_qty): - akey = get_random_string(akey_size, self.get_akeys()) - dkey = get_random_string(dkey_size, self.get_dkeys()) - if data_array_size == 0: - data = get_random_string(data_size) - else: - data = [ - get_random_string(data_size) - for _ in range(data_array_size)] - # Write single data to the container - self.write_record(container, akey, dkey, data, rank, obj_class) - # Verify the data was written correctly - data_read = self.read_record( - container, akey, dkey, data_size, data_array_size) - if data != data_read: - raise DaosTestError( - "Written data confirmation failed:" - "\n wrote: {}\n read: {}".format(data, data_read)) - - def read_record(self, container, akey, dkey, data_size, data_array_size=0): - """Read a record from the container. - - Args: - container (TestContainer): container in which to write the object - akey (str): the akey - dkey (str): the dkey - data_size (int): size of the data to read - data_array_size (int): size of array item - - Raises: - DaosTestError: if there was an error reading the object - - Returns: - str: the data read for the container - """ - kwargs = {"dkey": dkey, "akey": akey, "obj": self.obj, "txn": self.txn} - try: - if data_array_size > 0: - kwargs["rec_count"] = data_array_size - kwargs["rec_size"] = data_size + 1 - self._log_method("read_an_array", kwargs) - read_data = container.container.read_an_array(**kwargs) - else: - kwargs["size"] = data_size - self._log_method("read_an_obj", kwargs) - read_data = container.container.read_an_obj(**kwargs) - except DaosApiError as error: - raise DaosTestError( - "Error reading {}data (dkey={}, akey={}, size={}) from " - "container {}: {}".format( - "array " if data_array_size > 0 else "", dkey, akey, - data_size, container.uuid, error)) - return [data[:-1] for data in read_data] \ - if data_array_size > 0 else read_data.value - - def read_object(self, container): - """Read an object from the container. - - Args: - container (TestContainer): container from which to read the object - - Returns: - bool: True if ll the records where read successfully and matched - what was originally written; False otherwise - """ - status = len(self.records) > 0 - for record_info in self.records: - expect = record_info["data"] - kwargs = { - "container": container, - "akey": record_info["akey"], - "dkey": record_info["dkey"], - "data_size": len(expect[0].split()), - } - try: - if isinstance(expect, list): - kwargs["data_size"] = len(expect[0]) if expect else 0 - kwargs["data_array_size"] = len(expect) - else: - kwargs["data_size"] = len(expect) - kwargs["data_array_size"] = 0 - actual = self.read_record(**kwargs) - except DaosApiError as error: - container.log.error(error) - status = False - finally: - if actual != expect: - container.log.error( - "Error data mismatch: expected: %s, actual: %s", - expect, actual) - status = False - return status - - -class TestContainer(TestDaosApiBase): - """A class for functional testing of DaosContainer objects.""" - - def __init__(self, pool, cb_handler=None): - """Create a TeestContainer object. - - Args: - pool (TestPool): the test pool in which to create the container - cb_handler (CallbackHandler, optional): callback object to use with - the API methods. Defaults to None. - """ - super(TestContainer, self).__init__("/run/container/*", cb_handler) - self.pool = pool - - self.object_qty = BasicParameter(None) - self.record_qty = BasicParameter(None) - self.akey_size = BasicParameter(None) - self.dkey_size = BasicParameter(None) - self.data_size = BasicParameter(None) - self.data_array_size = BasicParameter(0, 0) - - self.container = None - self.uuid = None - self.info = None - self.opened = False - self.written_data = [] - - @fail_on(DaosApiError) - def create(self, uuid=None): - """Create a container. - - Args: - uuid (str, optional): contianer uuid. Defaults to None. - """ - self.destroy() - self.log.info( - "Creating a container with pool handle %s", - self.pool.pool.handle.value) - self.container = DaosContainer(self.pool.context) - kwargs = {"poh": self.pool.pool.handle} - if uuid is not None: - kwargs["con_uuid"] = uuid - self._call_method(self.container.create, kwargs) - self.uuid = self.container.get_uuid_str() - self.log.info(" Container created with uuid %s", self.uuid) - - @fail_on(DaosApiError) - def open(self, pool_handle=None, container_uuid=None): - """Open the container with pool handle and container UUID if provided. - - Args: - pool_handle (TestPool.pool.handle, optional): Pool handle. - Defaults to None. - If you don't provide it, the default pool handle in - DaosContainer will be used. - If you created a TestPool instance and want to use its pool - handle, pass in something like self.pool[-1].pool.handle.value - container_uuid (hex, optional): Container UUID. Defaults to None. - If you want to use certain container's UUID, pass in - something like uuid.UUID(self.container[-1].uuid) - - Returns: - bool: True if the container has been opened; False if the container - is already opened. - - """ - if self.container and not self.opened: - self.log.info("Opening container %s", self.uuid) - kwargs = {} - kwargs["poh"] = pool_handle - kwargs["cuuid"] = container_uuid - self._call_method(self.container.open, kwargs) - self.opened = True - return True - return False - - @fail_on(DaosApiError) - def close(self): - """Close the container. - - Returns: - bool: True if the container has been closed; False if the container - is already closed. - - """ - if self.container and self.opened: - self.log.info("Closing container %s", self.uuid) - self._call_method(self.container.close, {}) - self.opened = False - return True - return False - - @fail_on(DaosApiError) - def destroy(self, force=1): - """Destroy the container. - - Args: - force (int, optional): force flag. Defaults to 1. - - Returns: - bool: True if the container has been destroyed; False if the - container does not exist. - - """ - if self.container: - self.close() - self.log.info("Destroying container %s", self.uuid) - if self.container.attached: - self._call_method(self.container.destroy, {"force": force}) - self.container = None - self.uuid = None - self.info = None - self.written_data = [] - return True - return False - - @fail_on(DaosApiError) - def get_info(self, coh=None): - """Query the container for information. - - Sets the self.info attribute. - - Args: - coh (str, optional): container handle override. Defaults to None. - """ - if self.container: - self.open() - self.log.info("Querying container %s", self.uuid) - self._call_method(self.container.query, {"coh": coh}) - self.info = self.container.info - - def check_container_info(self, ci_uuid=None, es_hce=None, es_lre=None, - es_lhe=None, es_ghce=None, es_glre=None, - es_ghpce=None, ci_nsnapshots=None, - ci_min_slipped_epoch=None): - # pylint: disable=unused-argument - """Check the container info attributes. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Args: - ci_uuid (str, optional): container uuid. Defaults to None. - es_hce (int, optional): hc epoch?. Defaults to None. - es_lre (int, optional): lr epoch?. Defaults to None. - es_lhe (int, optional): lh epoch?. Defaults to None. - es_ghce (int, optional): ghc epoch?. Defaults to None. - es_glre (int, optional): glr epoch?. Defaults to None. - es_ghpce (int, optional): ghpc epoch?. Defaults to None. - ci_nsnapshots (int, optional): number of snapshots. - Defaults to None. - ci_min_slipped_epoch (int, optional): . Defaults to None. - - Note: - Arguments may also be provided as a string with a number preceeded - by '<', '<=', '>', or '>=' for other comparisions besides the - default '=='. - - Returns: - bool: True if at least one expected value is specified and all the - specified values match; False otherwise - - """ - self.get_info() - checks = [ - (key, - c_uuid_to_str(getattr(self.info, key)) - if key == "ci_uuid" else getattr(self.info, key), - val) - for key, val in locals().items() - if key != "self" and val is not None] - return self._check_info(checks) - - @fail_on(DaosTestError) - def write_objects(self, rank=None, obj_class=None, debug=False): - """Write objects to the container. - - Args: - rank (int, optional): server rank. Defaults to None. - obj_class (int, optional): daos object class. Defaults to None. - debug (bool, optional): log the record write/read method calls. - Defaults to False. - - Raises: - DaosTestError: if there was an error writing the object - """ - self.open() - self.log.info( - "Writing %s object(s), with %s record(s) of %s bytes(s) each, in " - "container %s%s%s", - self.object_qty.value, self.record_qty.value, self.data_size.value, - self.uuid, " on rank {}".format(rank) if rank is not None else "", - " with object class {}".format(obj_class) - if obj_class is not None else "") - for _ in range(self.object_qty.value): - self.written_data.append(TestContainerData(debug)) - self.written_data[-1].write_object( - self, self.record_qty.value, self.akey_size.value, - self.dkey_size.value, self.data_size.value, rank, obj_class, - self.data_array_size.value) - - @fail_on(DaosTestError) - def read_objects(self, debug=False): - """Read the objects from the container and verify they match. - - Args: - debug (bool, optional): log the record read method calls. Defaults - to False. - - Returns: - bool: True if - """ - self.open() - self.log.info( - "Reading %s object(s) in container %s", - len(self.written_data), self.uuid) - status = len(self.written_data) > 0 - for data in self.written_data: - data.debug = debug - status &= data.read_object(self) - return status - - def execute_io(self, duration, rank=None, obj_class=None, debug=False): - """Execute writes and reads for the specified time period. - - Args: - duration (int): how long, in seconds, to write and read data - rank (int, optional): server rank. Defaults to None. - obj_class (int, optional): daos object class. Defaults to None. - debug (bool, optional): log the record write/read method calls. - Defaults to False. - - Returns: - int: number of bytes written to the container - - Raises: - DaosTestError: if there is an error writing, reading, or verify the - data - - """ - self.open() - self.log.info( - "Writing and reading objects in container %s for %s seconds", - self.uuid, duration) - - total_bytes_written = 0 - finish_time = time() + duration - while time() < finish_time: - self.written_data.append(TestContainerData(debug)) - self.written_data[-1].write_object( - self, 1, self.akey_size.value, self.dkey_size.value, - self.data_size.value, rank, obj_class) - total_bytes_written += self.data_size.value - return total_bytes_written - - def get_target_rank_lists(self, message=""): - """Get a list of lists of target ranks from each written object. - - Args: - message (str, optional): message to include in the target rank list - output. Defaults to "". - - Raises: - DaosTestError: if there is an error obtaining the target rank list - from the DaosObj - - Returns: - list: a list of list of targets for each written object in this - container - - """ - self.open() - target_rank_lists = [] - for data in self.written_data: - try: - data.obj.get_layout() - # Convert the list of longs into a list of ints - target_rank_lists.append( - [int(rank) for rank in data.obj.tgt_rank_list]) - except DaosApiError as error: - raise DaosTestError( - "Error obtaining target rank list for object {} in " - "container {}: {}".format(data.obj, self.uuid, error)) - if message is not None: - self.log.info("Target rank lists%s:", message) - for ranks in target_rank_lists: - self.log.info(" %s", ranks) - return target_rank_lists - - def get_target_rank_count(self, rank, target_rank_list): - """Get the number of times a rank appears in the target rank list. - - Args: - rank (int): the rank to count. Defaults to None. - target_rank_list (list): a list of lists of target ranks per object - - Returns: - (int): the number of object rank lists containing the rank - - """ - count = sum([ranks.count(rank) for ranks in target_rank_list]) - self.log.info( - "Occurrences of rank %s in the target rank list: %s", rank, count) - return count - - def punch_objects(self, indices): - """Punch committed objects from the container. - - Args: - indices (list): list of index numbers indicating which written - objects to punch from the self.written_data list - - Raises: - DaosTestError: if there is an error punching the object or - determining which objec to punch - - Returns: - int: number of successfully punched objects - - """ - self.open() - self.log.info( - "Punching %s objects from container %s with %s written objects", - len(indices), self.uuid, len(self.written_data)) - count = 0 - if len(self.written_data) > 0: - for index in indices: - # Find the object to punch at the specified index - txn = 0 - try: - obj = self.written_data[index].obj - except IndexError: - raise DaosTestError( - "Invalid index {} for written data".format(index)) - - # Close the object - self.log.info( - "Closing object (index: %s, txn: %s) in container %s", - index, txn, self.uuid) - try: - self._call_method(obj.close, {}) - except DaosApiError as error: - self.log.error(" Error: %s", str(error)) - continue - - # Punch the object - self.log.info( - "Punching object (index: %s, txn: %s) from container %s", - index, txn, self.uuid) - try: - self._call_method(obj.punch, {"txn": txn}) - count += 1 - except DaosApiError as error: - self.log.error(" Error: %s", str(error)) - - # Retutrn the number of punched objects - return count diff --git a/src/tests/ftest/util/test_utils_container.py b/src/tests/ftest/util/test_utils_container.py index 339bc2ccea7..0c774f24e22 100644 --- a/src/tests/ftest/util/test_utils_container.py +++ b/src/tests/ftest/util/test_utils_container.py @@ -308,12 +308,14 @@ def create(self, uuid=None, con_in=None): kwargs["con_uuid"] = uuid # Refer daos_api for setting input params for DaosContainer. if con_in is not None: - self.input_params.type = con_in[0] - self.input_params.enable_chksum = con_in[1] - self.input_params.srv_verify = con_in[2] - self.input_params.chksum_type = con_in[3] - self.input_params.chunk_size = con_in[4] - kwargs["con_prop"] = self.input_params + cop = self.input_params.get_con_create_params() + cop.type = con_in[0] + cop.enable_chksum = con_in[1] + cop.srv_verify = con_in[2] + cop.chksum_type = con_in[3] + cop.chunk_size = con_in[4] + kwargs["con_prop"] = cop + self._call_method(self.container.create, kwargs) self.uuid = self.container.get_uuid_str() self.log.info(" Container created with uuid %s", self.uuid) diff --git a/src/tests/suite/SConscript b/src/tests/suite/SConscript index 5ddb1bf2f6b..70ca3cb50ca 100644 --- a/src/tests/suite/SConscript +++ b/src/tests/suite/SConscript @@ -1,6 +1,60 @@ """Build test suite""" +import subprocess +""" hack to handle old subprocess version """ +try: + from subprocess import DEVNULL +except ImportError: + import os + DEVNULL = open(os.devnull, "wb") + import daos_build +test_cmocka_skip = """ +#include +#include +#include +#include + +static void +test(void **state) { skip(); } + +int main(int argc, char **argv) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(test), + cmocka_unit_test(test), + }; + return cmocka_run_group_tests(tests, NULL, NULL); +} +""" + +def CheckCmockaSkip(context): + context.Message('Checking if cmocka skip() bug is present ... ') + rc = context.TryCompile(test_cmocka_skip, '.c') + if rc == 0: + print(" (Compile failed) assuming ", + context.Result(not rc)) + return rc + rc = context.TryLink(test_cmocka_skip, '.c') + if rc == 0: + print(" (Link failed) assuming ", + context.Result(not rc)) + return rc + prog = context.lastTarget + pname = prog.get_abspath() + rc = subprocess.call(pname, env={"CMOCKA_TEST_ABORT": "1"}, stdout=DEVNULL, + stderr=DEVNULL) + """ in case of abort rc is -6 instead of 134 (128+6) with shell ... """ + if rc == -6: + print(" (Bug reproduced) ", context.Result(rc)) + else: + if rc != 0: + print(" (Other error than bug) assuming ", context.Result(rc)) + else: + print(" (Bug not reproduced) ", context.Result(rc)) + """ return 0 means error """ + return not rc + def scons(): """Execute build""" Import('denv') @@ -19,7 +73,18 @@ def scons(): daos_test_obj = denv.SharedObject(['daos_obj.c']) Export('daos_test_obj') - test = daos_build.program(denv, 'daos_test', Glob('*.c'), LIBS=libraries) + newenv = denv.Clone() + conf = Configure(newenv, custom_tests = {'CheckCmockaSkip' : CheckCmockaSkip}) + conf.env.AppendUnique(LIBS=['cmocka']) + if not conf.CheckCmockaSkip(): + """ it would be cool to be able to check exit code is effectively 134 + (for abort() upon skip() bug) but in all error cases we should + decide to use workaround """ + conf.env.AppendUnique(CCFLAGS=['-DOVERRIDE_CMOCKA_SKIP']) + print("libcmocka with broken skip(), using workaround (DAOS-1093).") + newenv = conf.Finish() + + test = daos_build.program(newenv, 'daos_test', Glob('*.c'), LIBS=libraries) denv.Install('$PREFIX/bin/', test) denv.Install('$PREFIX/bin/io_conf', Glob('io_conf/daos_io_conf_1')) denv.Install('$PREFIX/bin/io_conf', Glob('io_conf/daos_io_conf_2')) diff --git a/src/tests/suite/daos_rebuild.c b/src/tests/suite/daos_rebuild.c index 4ead74677a3..47ab970455f 100644 --- a/src/tests/suite/daos_rebuild.c +++ b/src/tests/suite/daos_rebuild.c @@ -1263,7 +1263,7 @@ rebuild_multiple_tgts(void **state) daos_obj_id_t oid; struct daos_obj_layout *layout; d_rank_t leader; - d_rank_t exclude_ranks[2]; + d_rank_t exclude_ranks[2] = { 0 }; int i; if (!test_runable(arg, 6)) diff --git a/src/tests/suite/daos_test.h b/src/tests/suite/daos_test.h index 5876603312c..b11abe80f41 100644 --- a/src/tests/suite/daos_test.h +++ b/src/tests/suite/daos_test.h @@ -39,6 +39,7 @@ #include #include +#ifdef OVERRIDE_CMOCKA_SKIP /* redefine cmocka's skip() so it will no longer abort() * if CMOCKA_TEST_ABORT=1 * @@ -54,6 +55,7 @@ _skip(__FILE__, __LINE__); \ return; \ } while (0) +#endif #include #include diff --git a/src/tests/suite/dfs_test.c b/src/tests/suite/dfs_test.c index 01d89137f28..24bd58ca297 100644 --- a/src/tests/suite/dfs_test.c +++ b/src/tests/suite/dfs_test.c @@ -554,6 +554,44 @@ dfs_test_cond(void **state) MPI_Barrier(MPI_COMM_WORLD); } +static void +dfs_test_syml(void **state) +{ + dfs_obj_t *sym; + char *filename = "syml_file"; + char *val = "SYMLINK VAL 1"; + char tmp_buf[64]; + struct stat stbuf; + daos_size_t size = 0; + int rc, op_rc; + + op_rc = dfs_open(dfs_mt, NULL, filename, S_IFLNK | S_IWUSR | S_IRUSR, + O_RDWR | O_CREAT | O_EXCL, 0, 0, val, &sym); + rc = check_one_success(op_rc, EEXIST, MPI_COMM_WORLD); + assert_int_equal(rc, 0); + if (op_rc != 0) + goto syml_stat; + + rc = dfs_get_symlink_value(sym, NULL, &size); + assert_int_equal(rc, 0); + assert_int_equal(size, strlen(val)+1); + + rc = dfs_get_symlink_value(sym, tmp_buf, &size); + assert_int_equal(rc, 0); + assert_int_equal(size, strlen(val) + 1); + assert_string_equal(val, tmp_buf); + + rc = dfs_release(sym); + assert_int_equal(rc, 0); + +syml_stat: + rc = dfs_stat(dfs_mt, NULL, filename, &stbuf); + assert_int_equal(rc, 0); + assert_int_equal(stbuf.st_size, strlen(val)); + + MPI_Barrier(MPI_COMM_WORLD); +} + static const struct CMUnitTest dfs_tests[] = { { "DFS_TEST1: DFS mount / umount", dfs_test_mount, async_disable, test_case_teardown}, @@ -563,6 +601,8 @@ static const struct CMUnitTest dfs_tests[] = { dfs_test_read_shared_file, async_disable, test_case_teardown}, { "DFS_TEST4: Conditional OPs", dfs_test_cond, async_disable, test_case_teardown}, + { "DFS_TEST5: Simple Symlinks", + dfs_test_syml, async_disable, test_case_teardown}, }; static int diff --git a/src/utils/daos.c b/src/utils/daos.c index c717667a650..0240c631370 100644 --- a/src/utils/daos.c +++ b/src/utils/daos.c @@ -1071,8 +1071,63 @@ print_oclass_names_list(FILE *stream) free(str); } +#define FIRST_LEVEL_HELP() \ +do { \ + fprintf(stream, \ + "usage: daos RESOURCE COMMAND [OPTIONS]\n" \ + "resources:\n" \ + " pool pool\n" \ + " container (cont) container\n" \ + " object (obj) object\n" \ + " version print command version\n" \ + " help print this message and exit\n"); \ + fprintf(stream, "\n"); \ + fprintf(stream, "use 'daos help RESOURCE' for resource specifics\n"); \ +} while (0) + +#define ALL_CONT_CMDS_HELP() \ +do { \ + fprintf(stream, "\n" \ + "container (cont) commands:\n" \ + " create create a container\n" \ + " destroy destroy a container\n" \ + " list-objects list all objects in container\n" \ + " list-obj\n" \ + " query query a container\n" \ + " get-prop get all container's properties\n" \ + " set-prop set container's properties\n" \ + " get-acl get a container's ACL\n" \ + " overwrite-acl replace a container's ACL\n" \ + " update-acl add/modify entries in a container's ACL\n" \ + " delete-acl delete an entry from a container's ACL\n" \ + " set-owner change the user and/or group that own a container\n" \ + " stat get container statistics\n" \ + " list-attrs list container user-defined attributes\n" \ + " del-attr delete container user-defined attribute\n" \ + " get-attr get container user-defined attribute\n" \ + " set-attr set container user-defined attribute\n" \ + " create-snap create container snapshot (optional name)\n" \ + " at most recent committed epoch\n" \ + " list-snaps list container snapshots taken\n" \ + " destroy-snap destroy container snapshots\n" \ + " by name, epoch or range\n" \ + " rollback roll back container to specified snapshot\n"); \ + fprintf(stream, "\n"); \ + fprintf(stream, "use 'daos help cont|container COMMAND' for command specific options\n"); \ +} while (0) + +#define ALL_BUT_CONT_CREATE_OPTS_HELP() \ +do { \ + fprintf(stream, \ + "container options (query, and all commands except create):\n" \ + " with --cont use: (--pool, --sys-name, --svc)\n" \ + " with --path use: (--sys-name, --svc)\n" \ + " --cont=UUID (mandatory, or use --path)\n" \ + " --path=PATHSTR (mandatory, or use --cont)\n"); \ +} while (0) + static int -help_hdlr(struct cmd_args_s *ap) +help_hdlr(int argc, char *argv[], struct cmd_args_s *ap) { FILE *stream; @@ -1082,152 +1137,145 @@ help_hdlr(struct cmd_args_s *ap) fprintf(stream, "daos command (v%s)\n", DAOS_VERSION); - fprintf(stream, -"usage: daos RESOURCE COMMAND [OPTIONS]\n" -"resources:\n" -" pool pool\n" -" container (cont) container\n" -" version print command version\n" -" help print this message and exit\n"); - - fprintf(stream, "\n" -"pool commands:\n" -" list-containers list all containers in pool\n" -" list-cont\n" -" query query a pool\n" -" stat get pool statistics\n" -" list-attrs list pool user-defined attributes\n" -" get-attr get pool user-defined attribute\n"); - - fprintf(stream, -"pool options:\n" -" --pool=UUID pool UUID\n" -" --sys-name=STR DAOS system name context for servers (\"%s\")\n" -" --sys=STR\n" -" --svc=RANKS pool service replicas like 1,2,3\n" -" --attr=NAME pool attribute name to get\n", - default_sysname); - - fprintf(stream, "\n" -"container (cont) commands:\n" -" create create a container\n" -" destroy destroy a container\n" -" list-objects list all objects in container\n" -" list-obj\n" -" query query a container\n" -" get-prop get a container's properties\n" -" set-prop set a container's properties\n" -" get-acl get a container's ACL\n" -" overwrite-acl replace a container's ACL\n" -" update-acl add/modify entries in a container's ACL\n" -" delete-acl delete an entry from a container's ACL\n" -" set-owner change the user and/or group that own a container\n" -" stat get container statistics\n" -" list-attrs list container user-defined attributes\n" -" del-attr delete container user-defined attribute\n" -" get-attr get container user-defined attribute\n" -" set-attr set container user-defined attribute\n" -" create-snap create container snapshot (optional name)\n" -" at most recent committed epoch\n" -" list-snaps list container snapshots taken\n" -" destroy-snap destroy container snapshots\n" -" by name, epoch or range\n" -" rollback roll back container to specified snapshot\n"); - -#if 0 - fprintf(stream, -"container (cont) options:\n" -" (--pool, --sys-name, --svc)\n" -" --cont=UUID container UUID\n" -" --attr=NAME container attribute name to set, get, del\n" -" --value=VALUESTR container attribute value to set\n" -" --path=PATHSTR container namespace path\n" -" --type=CTYPESTR container type (HDF5, POSIX)\n" -" --oclass=OCLSSTR container object class\n" -" (tiny, small, large, R2, R2S, repl_max)\n" -" --chunk_size=BYTES chunk size of files created. Supports suffixes:\n" -" K (KB), M (MB), G (GB), T (TB), P (PB), E (EB)\n" -" --snap=NAME container snapshot (create/destroy-snap, rollback)\n" -" --epc=EPOCHNUM container epoch (destroy-snap, rollback)\n" -" --eprange=B-E container epoch range (destroy-snap)\n" -" --force destroy container regardless of state\n"); -#endif - - fprintf(stream, -"container options (create by UUID):\n" -" (--pool, --sys-name, --svc)\n" -" --cont=UUID (optional) container UUID (or generated)\n" -"container options (create and link to namespace path):\n" -" (--pool, --sys-name, --svc, --cont [optional])\n" -" --path=PATHSTR container namespace path\n" -" --type=CTYPESTR container type (HDF5, POSIX)\n" -" --oclass=OCLSSTR container object class\n" -" ("); - /* vs hardcoded list like "tiny, small, large, R2, R2S, repl_max" */ - print_oclass_names_list(stream); - fprintf(stream, ")\n" -" --chunk_size=BYTES chunk size of files created. Supports suffixes:\n" -" K (KB), M (MB), G (GB), T (TB), P (PB), E (EB)\n" -" --properties=:[,:,...]\n" -" supported prop names are label, cksum,\n" -" cksum_size, srv_cksum, rf\n" -" label value can be any string\n" -" cksum supported values are off, crc[16,32,64], sha1\n" -" cksum_size can be any size\n" -" srv_cksum values can be on, off\n" -" rf supported values are [0-4]\n" -" --acl-file=PATH input file containing ACL\n" -" --user=ID user who will own the container.\n" -" format: username@[domain]\n" -" default is the effective user\n" -" --group=ID group who will own the container.\n" -" format: groupname@[domain]\n" -" default is the effective group\n" -"container options (destroy):\n" -" --force destroy container regardless of state\n" -"container options (query, and all commands except create):\n" -" with --cont use: (--pool, --sys-name, --svc)\n" -" with --path use: (--sys-name, --svc)\n" -" --cont=UUID (mandatory, or use --path)\n" -" --path=PATHSTR (mandatory, or use --cont)\n" -"container options (attribute-related):\n" -" --attr=NAME container attribute name to set, get, del\n" -" --value=VALUESTR container attribute value to set\n" -"container options (snapshot and rollback-related):\n" -" --snap=NAME container snapshot (create/destroy-snap, rollback)\n" -" --epc=EPOCHNUM container epoch (destroy-snap, rollback)\n" -" --eprange=B-E container epoch range (destroy-snap)\n" -"container options (set-prop):\n" -" --properties=:[,:,...]\n" -" supported prop names: label\n" -" label value can be any string\n" -"container options (ACL-related):\n" -" --acl-file=PATH input file containing ACL (overwrite-acl, " -" update-acl)\n" -" --entry=ACE add or modify a single ACL entry (update-acl)\n" -" --principal=ID principal of entry (delete-acl)\n" -" for users: u:name@[domain]\n" -" for groups: g:name@[domain]\n" -" special principals: OWNER@, GROUP@, EVERYONE@\n" -" --verbose verbose mode (get-acl)\n" -" --outfile=PATH write ACL to file (get-acl)\n" -"container options (set-owner):\n" -" --user=ID user who will own the container.\n" -" format: username@[domain]\n" -" --group=ID group who will own the container.\n" -" format: groupname@[domain]\n"); - - fprintf(stream, "\n" -"object (obj) commands:\n" -" query query an object's layout\n" -" list-keys list an object's keys\n" -" dump dump an object's contents\n"); - - fprintf(stream, -"object (obj) options:\n" -" (--pool, --sys-name, --svc)\n" -" (--cont)\n" -" --oid=HI.LO object ID\n"); + if (argc == 2) { + FIRST_LEVEL_HELP(); + } else if (strcmp(argv[2], "pool") == 0) { + fprintf(stream, "\n" + "pool commands:\n" + " list-containers list all containers in pool\n" + " list-cont\n" + " query query a pool\n" + " stat get pool statistics\n" + " list-attrs list pool user-defined attributes\n" + " get-attr get pool user-defined attribute\n"); + + fprintf(stream, + "pool options:\n" + " --pool=UUID pool UUID\n" + " --sys-name=STR DAOS system name context for servers (\"%s\")\n" + " --sys=STR\n" + " --svc=RANKS pool service replicas like 1,2,3\n" + " --attr=NAME pool attribute name to get\n", + default_sysname); + + } else if (strcmp(argv[2], "container") == 0 || + strcmp(argv[2], "cont") == 0) { + if (argc == 3) { + ALL_CONT_CMDS_HELP(); + } else if (strcmp(argv[3], "create") == 0) { + fprintf(stream, + "container options (create by UUID):\n" + " (--pool, --sys-name, --svc)\n" + " --cont=UUID (optional) container UUID (or generated)\n" + "container options (create and link to namespace path):\n" + " (--pool, --sys-name, --svc, --cont [optional])\n" + " --path=PATHSTR container namespace path\n" + "container create common optional options:\n" + " --type=CTYPESTR container type (HDF5, POSIX)\n" + " --oclass=OCLSSTR container object class\n" + " ("); + /* vs hardcoded list like "tiny, small, large, R2, R2S, repl_max" */ + print_oclass_names_list(stream); + fprintf(stream, ")\n" + " --chunk_size=BYTES chunk size of files created. Supports suffixes:\n" + " K (KB), M (MB), G (GB), T (TB), P (PB), E (EB)\n" + " --properties=:[,:,...]\n" + " supported prop names are label, cksum,\n" + " cksum_size, srv_cksum, rf\n" + " label value can be any string\n" + " cksum supported values are off, crc[16,32,64], sha1\n" + " cksum_size can be any size\n" + " srv_cksum values can be on, off\n" + " rf supported values are [0-4]\n" + " --acl-file=PATH input file containing ACL\n" + " --user=ID user who will own the container.\n" + " format: username@[domain]\n" + " default is the effective user\n" + " --group=ID group who will own the container.\n" + " format: groupname@[domain]\n" + " default is the effective group\n"); + } else if (strcmp(argv[3], "destroy") == 0) { + fprintf(stream, + "container options (destroy):\n" + " --force destroy container regardless of state\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "get-attr") == 0 || + strcmp(argv[3], "set-attr") == 0 || + strcmp(argv[3], "del-attr") == 0) { + fprintf(stream, + "container options (attribute-related):\n" + " --attr=NAME container attribute name to set, get, del\n" + " --value=VALUESTR container attribute value to set\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "create-snap") == 0 || + strcmp(argv[3], "destroy-snap") == 0 || + strcmp(argv[3], "rollback") == 0) { + fprintf(stream, + "container options (snapshot and rollback-related):\n" + " --snap=NAME container snapshot (create/destroy-snap, rollback)\n" + " --epc=EPOCHNUM container epoch (destroy-snap, rollback)\n" + " --eprange=B-E container epoch range (destroy-snap)\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "set-prop") == 0) { + fprintf(stream, + "container options (set-prop):\n" + " --properties=:[,:,...]\n" + " supported prop names: label\n" + " label value can be any string\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "get-acl") == 0 || + strcmp(argv[3], "overwrite-acl") == 0 || + strcmp(argv[3], "update-acl") == 0 || + strcmp(argv[3], "delete-acl") == 0) { + fprintf(stream, + "container options (ACL-related):\n" + " --acl-file=PATH input file containing ACL (overwrite-acl, " + " update-acl)\n" + " --entry=ACE add or modify a single ACL entry (update-acl)\n" + " --principal=ID principal of entry (delete-acl)\n" + " for users: u:name@[domain]\n" + " for groups: g:name@[domain]\n" + " special principals: OWNER@, GROUP@, EVERYONE@\n" + " --verbose verbose mode (get-acl)\n" + " --outfile=PATH write ACL to file (get-acl)\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "set-owner") == 0) { + fprintf(stream, + "container options (set-owner):\n" + " --user=ID user who will own the container.\n" + " format: username@[domain]\n" + " --group=ID group who will own the container.\n" + " format: groupname@[domain]\n"); + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else if (strcmp(argv[3], "list-objects") == 0 || + strcmp(argv[3], "list-obj") == 0 || + strcmp(argv[3], "query") == 0 || + strcmp(argv[3], "get-prop") == 0 || + strcmp(argv[3], "stat") == 0 || + strcmp(argv[3], "list-attrs") == 0 || + strcmp(argv[3], "list-snaps") == 0) { + ALL_BUT_CONT_CREATE_OPTS_HELP(); + } else { + ALL_CONT_CMDS_HELP(); + } + } else if (strcmp(argv[2], "obj") == 0 || + strcmp(argv[2], "object") == 0) { + fprintf(stream, "\n" + "object (obj) commands:\n" + " query query an object's layout\n" + " list-keys list an object's keys\n" + " dump dump an object's contents\n"); + + fprintf(stream, + "object (obj) options:\n" + " (--pool, --sys-name, --svc)\n" + " (--cont)\n" + " --oid=HI.LO object ID\n"); + + } else { + FIRST_LEVEL_HELP(); + } + return 0; } @@ -1241,9 +1289,11 @@ main(int argc, char *argv[]) /* argv[1] is RESOURCE or "help" or "version"; * argv[2] if provided is a resource-specific command */ - if (argc < 2 || strcmp(argv[1], "help") == 0) - hdlr = help_hdlr; - else if (strcmp(argv[1], "version") == 0) { + if (argc < 2 || strcmp(argv[1], "help") == 0) { + dargs.ostream = stdout; + help_hdlr(argc, argv, &dargs); + return 0; + } else if (strcmp(argv[1], "version") == 0) { fprintf(stdout, "daos version %s\n", DAOS_VERSION); return 0; } else if ((strcmp(argv[1], "container") == 0) || @@ -1257,16 +1307,10 @@ main(int argc, char *argv[]) if (hdlr == NULL) { dargs.ostream = stderr; - help_hdlr(&dargs); + help_hdlr(argc, argv, &dargs); return 2; } - if (hdlr == help_hdlr) { - dargs.ostream = stdout; - help_hdlr(&dargs); - return 0; - } - rc = daos_init(); if (rc != 0) { fprintf(stderr, "failed to initialize daos: %d\n", rc); @@ -1279,7 +1323,7 @@ main(int argc, char *argv[]) fprintf(stderr, "error parsing command line arguments\n"); if (rc > 0) { dargs.ostream = stderr; - help_hdlr(&dargs); + help_hdlr(argc, argv, &dargs); } daos_fini(); return -1; @@ -1298,7 +1342,7 @@ main(int argc, char *argv[]) else if (rc > 0) { printf("rc: %d\n", rc); dargs.ostream = stderr; - help_hdlr(&dargs); + help_hdlr(argc, argv, &dargs); return 2; } diff --git a/src/vos/SConscript b/src/vos/SConscript index 19da61dbb5c..a6ca0c1ef06 100644 --- a/src/vos/SConscript +++ b/src/vos/SConscript @@ -6,7 +6,8 @@ FILES = ["evt_iter.c", "vos_common.c", "vos_iterator.c", "vos_io.c", "vos_pool.c", "vos_aggregate.c", "vos_container.c", "vos_obj.c", "vos_obj_cache.c", "vos_obj_index.c", "vos_tree.c", "evtree.c", "vos_dtx.c", "vos_dtx_cos.c", "vos_query.c", "vos_overhead.c", - "vos_dtx_iter.c", "vos_gc.c", "vos_ilog.c", "ilog.c", "vos_ts.c"] + "vos_dtx_iter.c", "vos_gc.c", "vos_ilog.c", "ilog.c", "vos_ts.c", + "lru_array.c"] def build_vos(env, standalone): """build vos""" diff --git a/src/vos/ilog.c b/src/vos/ilog.c index b0a0f8fd68a..1344ae8b16a 100644 --- a/src/vos/ilog.c +++ b/src/vos/ilog.c @@ -630,9 +630,6 @@ ilog_destroy(struct umem_instance *umm, fail: rc = ilog_tx_end(&lctx, rc); - if (rc == 0) - vos_ts_evict(&lctx.ic_root->lr_ts_idx); - return rc; } diff --git a/src/vos/lru_array.c b/src/vos/lru_array.c new file mode 100644 index 00000000000..610e4dcfc44 --- /dev/null +++ b/src/vos/lru_array.c @@ -0,0 +1,175 @@ +/** + * (C) Copyright 2020 Intel Corporation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE + * The Government's rights to use, modify, reproduce, release, perform, display, + * or disclose this software are subject to the terms of the Apache License as + * provided in Contract No. B609815. + * Any reproduction of computer software, computer software documentation, or + * portions thereof marked with this legend must also reproduce the markings. + */ +/** + * LRU array implementation + * vos/lru_array.c + * + * Author: Jeff Olivier + */ +#include "lru_array.h" + +static void +evict_cb(struct lru_array *array, struct lru_entry *entry, uint32_t idx) +{ + if (array->la_cbs.lru_on_evict == NULL) + return; + + array->la_cbs.lru_on_evict(entry->le_payload, idx, array->la_arg); +} + +static void +init_cb(struct lru_array *array, struct lru_entry *entry, uint32_t idx) +{ + if (array->la_cbs.lru_on_init == NULL) + return; + + array->la_cbs.lru_on_init(entry->le_payload, idx, array->la_arg); +} + +static void +fini_cb(struct lru_array *array, struct lru_entry *entry, uint32_t idx) +{ + if (array->la_cbs.lru_on_fini == NULL) + return; + + array->la_cbs.lru_on_fini(entry->le_payload, idx, array->la_arg); +} + +void +lrua_evict_lru(struct lru_array *array, struct lru_entry **entryp, + uint32_t *idx, bool evict_lru) +{ + struct lru_entry *entry; + + *entryp = NULL; + + entry = &array->la_table[array->la_lru]; + + if (entry->le_record_idx != NULL) { + if (!evict_lru) + return; /* Caller has not set eviction flag */ + + evict_cb(array, entry, array->la_lru); + } + + *idx = array->la_lru; + entry->le_record_idx = idx; + array->la_lru = entry->le_next_idx; + array->la_mru = *idx; + + *entryp = entry; +} + +void +lrua_evict(struct lru_array *array, uint32_t *idx) +{ + struct lru_entry *entry; + uint32_t tidx; + + D_ASSERT(array != NULL); + D_ASSERT(idx != NULL && *idx < array->la_count); + tidx = *idx; + + entry = &array->la_table[tidx]; + if (idx != entry->le_record_idx) + return; + + evict_cb(array, entry, tidx); + + entry->le_record_idx = NULL; + + if (array->la_mru == tidx) + array->la_mru = entry->le_prev_idx; + + if (array->la_lru == tidx) + return; + + /** Remove from current location */ + lrua_remove_entry(&array->la_table[0], entry); + + /** Add at the LRU */ + lrua_insert_entry(&array->la_table[0], entry, tidx, array->la_mru, + array->la_lru); + + array->la_lru = tidx; +} + +int +lrua_array_alloc(struct lru_array **arrayp, uint32_t nr_ent, + uint32_t record_size, const struct lru_callbacks *cbs, + void *arg) +{ + struct lru_array *array; + struct lru_entry *current; + uint32_t aligned_size; + uint32_t cur_idx; + uint32_t next_idx; + uint32_t prev_idx; + + aligned_size = (record_size + 7) & ~7; + + *arrayp = NULL; + + D_ALLOC(array, sizeof(*array) + + (sizeof(array->la_table[0]) + aligned_size) * nr_ent); + if (array == NULL) + return -DER_NOMEM; + + prev_idx = array->la_mru = nr_ent - 1; + array->la_arg = arg; + array->la_lru = 0; + array->la_count = nr_ent; + array->la_record_size = aligned_size; + array->la_payload = &array->la_table[nr_ent]; + if (cbs != NULL) + array->la_cbs = *cbs; + cur_idx = 0; + for (cur_idx = 0; cur_idx < nr_ent; cur_idx++) { + next_idx = (cur_idx + 1) % nr_ent; + current = &array->la_table[cur_idx]; + current->le_payload = array->la_payload + + (aligned_size * cur_idx); + current->le_next_idx = next_idx; + current->le_prev_idx = prev_idx; + prev_idx = cur_idx; + init_cb(array, current, cur_idx); + } + + *arrayp = array; + + return 0; +} + +void +lrua_array_free(struct lru_array *array) +{ + int i; + + if (array == NULL) + return; + + for (i = 0; i < array->la_count; i++) + fini_cb(array, &array->la_table[i], i); + + D_FREE(array); +} diff --git a/src/vos/lru_array.h b/src/vos/lru_array.h new file mode 100644 index 00000000000..1fc782dece2 --- /dev/null +++ b/src/vos/lru_array.h @@ -0,0 +1,235 @@ +/** + * (C) Copyright 2020 Intel Corporation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE + * The Government's rights to use, modify, reproduce, release, perform, display, + * or disclose this software are subject to the terms of the Apache License as + * provided in Contract No. B609815. + * Any reproduction of computer software, computer software documentation, or + * portions thereof marked with this legend must also reproduce the markings. + */ +/** + * Generic struct for allocating LRU entries in an array + * common/lru_array.h + * + * Author: Jeff Olivier + */ + +#ifndef __LRU_ARRAY__ +#define __LRU_ARRAY__ + +#include + +struct lru_callbacks { + /** Called when an entry is going to be evicted from cache */ + void (*lru_on_evict)(void *entry, uint32_t idx, void *arg); + /** Called on initialization of an entry */ + void (*lru_on_init)(void *entry, uint32_t idx, void *arg); + /** Called on finalization of an entry */ + void (*lru_on_fini)(void *entry, uint32_t idx, void *arg); +}; + +struct lru_entry { + /** The pointer to the index is unique identifier for the entry */ + uint32_t *le_record_idx; + /** Pointer to this entry */ + void *le_payload; + /** Next index in LRU array */ + uint32_t le_next_idx; + /** Previous index in LRU array */ + uint32_t le_prev_idx; +}; + +struct lru_array { + /** Least recently accessed index */ + uint32_t la_lru; + /** Most recently accessed index */ + uint32_t la_mru; + /** Number of indices */ + uint32_t la_count; + /** record size */ + uint32_t la_record_size; + /** Allocated payload entries */ + void *la_payload; + /** Callbacks for implementation */ + struct lru_callbacks la_cbs; + /** User callback argument passed on init */ + void *la_arg; + /** Entries in the array */ + struct lru_entry la_table[0]; +}; + +/** Internal API: Evict the LRU, move it to MRU, invoke eviction callback, + * and return the index + */ +void +lrua_evict_lru(struct lru_array *array, struct lru_entry **entry, + uint32_t *idx, bool evict_lru); + +/** Internal API: Remove an entry from the lru list */ +static inline void +lrua_remove_entry(struct lru_entry *entries, struct lru_entry *entry) +{ + struct lru_entry *prev = &entries[entry->le_prev_idx]; + struct lru_entry *next = &entries[entry->le_next_idx]; + + prev->le_next_idx = entry->le_next_idx; + next->le_prev_idx = entry->le_prev_idx; +} + +/** Internal API: Insert an entry in the lru list */ +static inline void +lrua_insert_entry(struct lru_entry *entries, struct lru_entry *entry, + uint32_t idx, uint32_t prev_idx, uint32_t next_idx) +{ + struct lru_entry *prev; + struct lru_entry *next; + + prev = &entries[prev_idx]; + next = &entries[next_idx]; + next->le_prev_idx = idx; + prev->le_next_idx = idx; + entry->le_prev_idx = prev_idx; + entry->le_next_idx = next_idx; +} + +/** Internal API: Make the entry the mru */ +static inline void +lrua_move_to_mru(struct lru_array *array, struct lru_entry *entry, uint32_t idx) +{ + if (array->la_mru == idx) { + /** Already the mru */ + return; + } + + if (array->la_lru == idx) + array->la_lru = entry->le_next_idx; + + /** First remove */ + lrua_remove_entry(&array->la_table[0], entry); + + /** Now add */ + lrua_insert_entry(&array->la_table[0], entry, idx, + array->la_mru, array->la_lru); + + array->la_mru = idx; +} + +/** Internal API to lookup entry from index */ +static inline struct lru_entry * +lrua_lookup_idx(struct lru_array *array, const uint32_t *idx) +{ + struct lru_entry *entry; + uint32_t tindex = *idx; + + if (tindex >= array->la_count) + return NULL; + + entry = &array->la_table[tindex]; + if (entry->le_record_idx == idx) { + lrua_move_to_mru(array, entry, tindex); + return entry; + } + + return NULL; +} + +/** Lookup an entry in the lru array. + * + * \param array[in] The lru array + * \param idx[in,out] Address of the record index. + * \param entryp[in,out] Valid only if function returns true. + * + * \return true if the entry is in the array and set \p entryp accordingly + */ +static inline bool +lrua_lookup(struct lru_array *array, const uint32_t *idx, + void **entryp) +{ + struct lru_entry *entry; + + D_ASSERT(array != NULL); + + *entryp = NULL; + + entry = lrua_lookup_idx(array, idx); + if (entry == NULL) + return false; + + *entryp = entry->le_payload; + return true; +} + +/** Allocate a new entry lru array. Lookup should be called first and this + * should only be called if it returns false. This will modify idx. If + * called within a transaction and the value needs to persist, the old value + * should be logged before calling this function. + * + * \param array[in] The LRU array + * \param idx[in,out] Address of the entry index. + * \param evict_lru[in] True if LRU should be evicted + * + * \return Returns a pointer to the entry or NULL if evict_lru is false + * and the entry at the LRU is allocated + */ +static inline void * +lrua_alloc(struct lru_array *array, uint32_t *idx, bool evict_lru) +{ + struct lru_entry *new_entry; + + D_ASSERT(array != NULL); + + lrua_evict_lru(array, &new_entry, idx, evict_lru); + + if (new_entry == NULL) + return NULL; + + return new_entry->le_payload; +} + +/** If an entry is still in the array, evict it and invoke eviction callback. + * Move the evicted entry to the LRU and mark it as already evicted. + * + * \param array[in] Address of the LRU array. + * \param idx[in] Address of the entry index. + */ +void +lrua_evict(struct lru_array *array, uint32_t *idx); + +/** Allocate an LRU array + * + * \param array[in,out] Pointer to LRU array + * \param nr_ent[in] Number of records in array + * \param rec_size[in] Size of each record + * \param cbs[in] Optional callbacks + * \param arg[in] Optional argument passed to all callbacks + * + * \return -DER_NOMEM Not enough memory available + * 0 Success + */ +int +lrua_array_alloc(struct lru_array **array, uint32_t nr_ent, + uint32_t record_size, const struct lru_callbacks *cbs, + void *arg); + + +/** Free an LRU array + * + * \param array[in] Pointer to LRU array + */ +void +lrua_array_free(struct lru_array *array); + +#endif /* __LRU_ARRAY__ */ diff --git a/src/vos/tests/vts_ts.c b/src/vos/tests/vts_ts.c index eaf1680f059..f65e4febb5d 100644 --- a/src/vos/tests/vts_ts.c +++ b/src/vos/tests/vts_ts.c @@ -34,11 +34,12 @@ #include #include +#define VOS_TS_SIZE (8 * 1024 * 1024) + #define NUM_EXTRA 4 struct ts_test_arg { - uint32_t *ta_records[VOS_TS_TYPE_COUNT]; + uint32_t ta_records[VOS_TS_TYPE_COUNT][VOS_TS_SIZE]; struct vos_ts_set *ta_ts_set; - uint32_t ta_real_records[VOS_TS_SIZE]; uint32_t ta_counts[VOS_TS_TYPE_COUNT]; uint32_t ta_extra_records[NUM_EXTRA]; }; @@ -169,7 +170,7 @@ run_positive_entry_test(struct ts_test_arg *ts_arg, uint32_t type) /** Now evict the extra records to reset the array for child tests */ for (idx = 0; idx < NUM_EXTRA; idx++) - vos_ts_evict(&ts_arg->ta_extra_records[idx]); + vos_ts_evict(&ts_arg->ta_extra_records[idx], type); /** evicting an entry should move it to lru */ vos_ts_set_reset(ts_arg->ta_ts_set, type, 0); @@ -177,7 +178,7 @@ run_positive_entry_test(struct ts_test_arg *ts_arg, uint32_t type) false, &same); assert_true(found); assert_int_equal(same->te_info->ti_type, type); - vos_ts_evict(&ts_arg->ta_records[type][20]); + vos_ts_evict(&ts_arg->ta_records[type][20], type); found = vos_ts_lookup(ts_arg->ta_ts_set, &ts_arg->ta_records[type][20], true, &entry); assert_false(found); @@ -221,7 +222,7 @@ ilog_test_ts_get(void **state) } for (type = VOS_TS_TYPE_AKEY;; type -= 2) { - vos_ts_evict(&ts_arg->ta_records[type][0]); + vos_ts_evict(&ts_arg->ta_records[type][0], type); found = vos_ts_lookup(ts_arg->ta_ts_set, &ts_arg->ta_records[type][0], true, &entry); @@ -261,9 +262,186 @@ alloc_ts_cache(void **state) return 0; } +struct index_record { + uint32_t idx; + uint32_t value; +}; + +#define LRU_ARRAY_SIZE 32 +#define NUM_INDEXES 128 +struct lru_arg { + struct lru_array *array; + struct index_record indexes[NUM_INDEXES]; +}; + +struct lru_record { + uint64_t magic1; + struct index_record *record; + uint32_t idx; + uint32_t custom; + uint64_t magic2; +}; + +#define MAGIC1 0xdeadbeef +#define MAGIC2 0xbaadf00d + +static void +on_entry_evict(void *payload, uint32_t idx, void *arg) +{ + struct lru_record *record = payload; + + record->record->value = 0xdeadbeef; +} + +static void +on_entry_init(void *payload, uint32_t idx, void *arg) +{ + struct lru_record *record = payload; + + record->idx = idx; + record->magic1 = MAGIC1; + record->magic2 = MAGIC2; +} + +static void +on_entry_fini(void *payload, uint32_t idx, void *arg) +{ + struct lru_record *record = payload; + + if (record->record) + record->record->value = 0xdeadbeef; +} + +static const struct lru_callbacks lru_cbs = { + .lru_on_evict = on_entry_evict, + .lru_on_init = on_entry_init, + .lru_on_fini = on_entry_fini, +}; + +static void +lru_array_test(void **state) +{ + struct lru_arg *ts_arg = *state; + struct lru_record *entry; + int i; + bool found; + int lru_idx; + + + for (i = 0; i < NUM_INDEXES; i++) { + found = lrua_lookup(ts_arg->array, &ts_arg->indexes[i].idx, + (void **)&entry); + assert_false(found); + } + + for (i = 0; i < NUM_INDEXES; i++) { + entry = lrua_alloc(ts_arg->array, &ts_arg->indexes[i].idx, + true); + assert_non_null(entry); + + entry->record = &ts_arg->indexes[i]; + ts_arg->indexes[i].value = i; + } + + for (i = NUM_INDEXES - 1; i >= 0; i--) { + found = lrua_lookup(ts_arg->array, &ts_arg->indexes[i].idx, + (void **)&entry); + if (found) { + assert_true(i >= (NUM_INDEXES - LRU_ARRAY_SIZE)); + assert_non_null(entry); + assert_true(entry->magic1 == MAGIC1); + assert_true(entry->magic2 == MAGIC2); + assert_true(i == ts_arg->indexes[i].value); + assert_true(entry->idx == ts_arg->indexes[i].idx); + } else { + assert_false(i >= (NUM_INDEXES - LRU_ARRAY_SIZE)); + assert_null(entry); + assert_true(ts_arg->indexes[i].value == 0xdeadbeef); + } + } + + lru_idx = NUM_INDEXES - 3; + found = lrua_lookup(ts_arg->array, + &ts_arg->indexes[lru_idx].idx, (void **)&entry); + assert_true(found); + assert_non_null(entry); + assert_true(entry->record->value == lru_idx); + + /* cache all but one new entry */ + for (i = 0; i < LRU_ARRAY_SIZE - 1; i++) { + found = lrua_lookup(ts_arg->array, &ts_arg->indexes[i].idx, + (void **)&entry); + assert_false(found); + entry = lrua_alloc(ts_arg->array, &ts_arg->indexes[i].idx, + true); + assert_non_null(entry); + + entry->record = &ts_arg->indexes[i]; + ts_arg->indexes[i].value = i; + + found = lrua_lookup(ts_arg->array, &ts_arg->indexes[i].idx, + (void *)&entry); + assert_non_null(entry); + assert_true(entry->magic1 == MAGIC1); + assert_true(entry->magic2 == MAGIC2); + assert_true(i == ts_arg->indexes[i].value); + assert_true(entry->idx == ts_arg->indexes[i].idx); + } + + /** lru_idx should still be there */ + found = lrua_lookup(ts_arg->array, + &ts_arg->indexes[lru_idx].idx, (void *)&entry); + assert_true(found); + assert_non_null(entry); + assert_true(entry->record->value == lru_idx); + + lrua_evict(ts_arg->array, &ts_arg->indexes[lru_idx].idx); + + found = lrua_lookup(ts_arg->array, + &ts_arg->indexes[lru_idx].idx, (void *)&entry); + assert_false(found); +} + +static int +init_lru_test(void **state) +{ + struct lru_arg *ts_arg; + int rc; + + D_ALLOC_PTR(ts_arg); + if (ts_arg == NULL) + return 1; + + rc = lrua_array_alloc(&ts_arg->array, LRU_ARRAY_SIZE, + sizeof(struct lru_record), &lru_cbs, + ts_arg); + + *state = ts_arg; + return rc; +} + +static int +finalize_lru_test(void **state) +{ + struct lru_arg *ts_arg = *state; + + if (ts_arg == NULL) + return 0; + + if (ts_arg->array == NULL) + return 0; + + lrua_array_free(ts_arg->array); + + return 0; +} + + static const struct CMUnitTest ts_tests[] = { { "VOS600.1: VOS timestamp allocation test", ilog_test_ts_get, alloc_ts_cache, NULL}, + { "VOS600.2: LRU array test", lru_array_test, init_lru_test, + finalize_lru_test}, }; static int @@ -272,7 +450,6 @@ ts_test_init(void **state) int i; struct vos_ts_table *ts_table; struct ts_test_arg *ts_arg; - uint32_t *cursor; int rc; D_ALLOC_PTR(ts_arg); @@ -285,12 +462,8 @@ ts_test_init(void **state) ts_table = vos_ts_table_get(); - cursor = &ts_arg->ta_real_records[0]; - for (i = 0; i < VOS_TS_TYPE_COUNT; i++) { + for (i = 0; i < VOS_TS_TYPE_COUNT; i++) ts_arg->ta_counts[i] = ts_table->tt_type_info[i].ti_count; - ts_arg->ta_records[i] = cursor; - cursor += ts_arg->ta_counts[i]; - } rc = vos_ts_set_allocate(&ts_arg->ta_ts_set, VOS_OF_USE_TIMESTAMPS, 1); if (rc != 0) { diff --git a/src/vos/vos_container.c b/src/vos/vos_container.c index 14128a986ee..48a21dd5af2 100644 --- a/src/vos/vos_container.c +++ b/src/vos/vos_container.c @@ -77,7 +77,7 @@ cont_df_rec_free(struct btr_instance *tins, struct btr_record *rec, void *args) return -DER_NONEXIST; cont_df = umem_off2ptr(&tins->ti_umm, rec->rec_off); - vos_ts_evict(&cont_df->cd_ts_idx); + vos_ts_evict(&cont_df->cd_ts_idx, VOS_TS_TYPE_CONT); return gc_add_item(tins->ti_priv, GC_CONT, rec->rec_off, 0); } diff --git a/src/vos/vos_dtx.c b/src/vos/vos_dtx.c index df96d9dd3d9..8072909d71e 100644 --- a/src/vos/vos_dtx.c +++ b/src/vos/vos_dtx.c @@ -539,8 +539,10 @@ vos_dtx_commit_one(struct vos_container *cont, struct dtx_id *dti, goto out; dtx_rec_release(cont, dae, false, &offset); - vos_dtx_del_cos(cont, &DAE_OID(dae), dti, DAE_DKEY_HASH(dae), + rc = vos_dtx_del_cos(cont, &DAE_OID(dae), dti, DAE_DKEY_HASH(dae), DAE_INTENT(dae) == DAOS_INTENT_PUNCH ? true : false); + if (rc != 0) + D_GOTO(out, rc); /* If dbtree_delete() failed, the @dae will be left in the active DTX * table until close the container. It is harmless but waste some DRAM. @@ -551,8 +553,9 @@ vos_dtx_commit_one(struct vos_container *cont, struct dtx_id *dti, umem_free(vos_cont2umm(cont), offset); out: - D_DEBUG(DB_IO, "Commit the DTX "DF_DTI": rc = "DF_RC"\n", - DP_DTI(dti), DP_RC(rc)); + D_CDEBUG(rc != 0 && rc != -DER_NONEXIST, DLOG_ERR, DB_IO, + "Commit the DTX "DF_DTI": rc = "DF_RC"\n", + DP_DTI(dti), DP_RC(rc)); if (rc != 0) { if (dce != NULL) D_FREE_PTR(dce); @@ -1250,13 +1253,16 @@ vos_dtx_commit_internal(struct vos_container *cont, struct dtx_id *dtis, dce_df = &dbd->dbd_commmitted_data[dbd->dbd_count]; } - for (i = 0, j = 0; i < slots; i++, cur++) { + for (i = 0, j = 0; i < slots && rc1 == 0; i++, cur++) { struct vos_dtx_cmt_ent *dce = NULL; rc = vos_dtx_commit_one(cont, &dtis[cur], epoch, &dce); if (rc == 0 && dce != NULL) committed++; + if (rc == -DER_NONEXIST) + rc = 0; + if (rc1 == 0) rc1 = rc; @@ -1282,7 +1288,7 @@ vos_dtx_commit_internal(struct vos_container *cont, struct dtx_id *dtis, if (j > 0) dbd->dbd_count += j; - if (count == 0) + if (count == 0 || rc1 != 0) return committed > 0 ? 0 : rc1; if (j < slots) { @@ -1342,13 +1348,16 @@ vos_dtx_commit_internal(struct vos_container *cont, struct dtx_id *dtis, cont_df->cd_dtx_committed_tail = dbd_off; - for (i = 0, j = 0; i < count; i++, cur++) { + for (i = 0, j = 0; i < count && rc1 == 0; i++, cur++) { struct vos_dtx_cmt_ent *dce = NULL; rc = vos_dtx_commit_one(cont, &dtis[cur], epoch, &dce); if (rc == 0 && dce != NULL) committed++; + if (rc == -DER_NONEXIST) + rc = 0; + if (rc1 == 0) rc1 = rc; diff --git a/src/vos/vos_dtx_cos.c b/src/vos/vos_dtx_cos.c index 65c2056085c..4750c1d0f7d 100644 --- a/src/vos/vos_dtx_cos.c +++ b/src/vos/vos_dtx_cos.c @@ -545,13 +545,13 @@ vos_dtx_fetch_committable(daos_handle_t coh, uint32_t max_cnt, return min(count, i); } -void +int vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, struct dtx_id *xid, uint64_t dkey_hash, bool punch) { struct dtx_cos_key key; - d_iov_t kiov; - d_iov_t riov; + d_iov_t kiov; + d_iov_t riov; struct dtx_cos_rec *dcr; struct dtx_cos_rec_child *dcrc; d_list_t *head; @@ -563,8 +563,15 @@ vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, d_iov_set(&riov, NULL, 0); rc = dbtree_lookup(cont->vc_dtx_cos_hdl, &kiov, &riov); - if (rc != 0) - return; + if (rc != 0) { + if (rc == -DER_NONEXIST) + return 0; + + D_ERROR("Fail to remove "DF_DTI" from CoS cache: %d\n", + DP_DTI(xid), rc); + + return rc; + } dcr = (struct dtx_cos_rec *)riov.iov_buf; if (punch) @@ -576,11 +583,6 @@ vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, if (memcmp(&dcrc->dcrc_dti, xid, sizeof(*xid)) != 0) continue; - D_DEBUG(DB_IO, "Remove DTX "DF_DTI" from CoS cache, " - "key %llu, intent %s, has ilog entry\n", - DP_DTI(&dcrc->dcrc_dti), (unsigned long long)dkey_hash, - punch ? "Punch" : "Update"); - d_list_del(&dcrc->dcrc_committable); d_list_del(&dcrc->dcrc_link); D_FREE_PTR(dcrc); @@ -593,24 +595,25 @@ vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, if (dcr->dcr_punch_count == 0 && dcr->dcr_update_count == 0 && dcr->dcr_ilog_count == 0) - dbtree_delete(cont->vc_dtx_cos_hdl, BTR_PROBE_EQ, - &kiov, NULL); + rc = dbtree_delete(cont->vc_dtx_cos_hdl, BTR_PROBE_EQ, + &kiov, NULL); + + D_CDEBUG(rc != 0, DLOG_ERR, DB_IO, "Remove DTX "DF_DTI" from " + "CoS cache, key %llu, intent %s, has ilog entry: %d\n", + DP_DTI(xid), (unsigned long long)dkey_hash, + punch ? "Punch" : "Update", rc); - return; + return rc; } if (punch) - return; + return 0; /* For UPDATE DTX, the DTX entry can be in {update,ilog}_list */ d_list_for_each_entry(dcrc, &dcr->dcr_update_list, dcrc_link) { if (memcmp(&dcrc->dcrc_dti, xid, sizeof(*xid)) != 0) continue; - D_DEBUG(DB_IO, "Remove DTX "DF_DTI" from CoS cache, " - "key %llu, intent Update, has not ilog entry\n", - DP_DTI(&dcrc->dcrc_dti), (unsigned long long)dkey_hash); - d_list_del(&dcrc->dcrc_committable); d_list_del(&dcrc->dcrc_link); D_FREE_PTR(dcrc); @@ -620,11 +623,18 @@ vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, if (dcr->dcr_punch_count == 0 && dcr->dcr_update_count == 0 && dcr->dcr_ilog_count == 0) - dbtree_delete(cont->vc_dtx_cos_hdl, BTR_PROBE_EQ, - &kiov, NULL); + rc = dbtree_delete(cont->vc_dtx_cos_hdl, BTR_PROBE_EQ, + &kiov, NULL); + + D_CDEBUG(rc != 0, DLOG_ERR, DB_IO, "Remove DTX "DF_DTI" from " + "CoS cache, key %llu, intent Update, has not ilog " + "entry: %d\n", + DP_DTI(xid), (unsigned long long)dkey_hash, rc); - return; + break; } + + return rc; } uint64_t diff --git a/src/vos/vos_ilog.c b/src/vos/vos_ilog.c index 41376b71441..4ca17f24332 100644 --- a/src/vos/vos_ilog.c +++ b/src/vos/vos_ilog.c @@ -520,11 +520,11 @@ vos_ilog_ts_mark(struct vos_ts_set *ts_set, struct ilog_df *ilog) } void -vos_ilog_ts_evict(struct ilog_df *ilog) +vos_ilog_ts_evict(struct ilog_df *ilog, uint32_t type) { uint32_t *idx; idx = ilog_ts_idx_get(ilog); - return vos_ts_evict(idx); + return vos_ts_evict(idx, type); } diff --git a/src/vos/vos_ilog.h b/src/vos/vos_ilog.h index 9fd0bdf8a46..60e8262cb09 100644 --- a/src/vos/vos_ilog.h +++ b/src/vos/vos_ilog.h @@ -311,8 +311,9 @@ vos_ilog_ts_mark(struct vos_ts_set *ts_set, struct ilog_df *ilog); /** Evict the cached timestamp entry, if present * * \param ilog[in] The incarnation log + * \param type[in] The timestamp type */ void -vos_ilog_ts_evict(struct ilog_df *ilog); +vos_ilog_ts_evict(struct ilog_df *ilog, uint32_t type); #endif /* __VOS_ILOG_H__ */ diff --git a/src/vos/vos_internal.h b/src/vos/vos_internal.h index a815d0c112a..c35aaceee5d 100644 --- a/src/vos/vos_internal.h +++ b/src/vos/vos_internal.h @@ -386,8 +386,11 @@ vos_dtx_cos_register(void); * \param xid [IN] Pointer to the DTX identifier. * \param dkey_hash [IN] The hashed dkey. * \param punch [IN] For punch DTX or not. + * + * \return Zero on success. + * \return Other negative value if error. */ -void +int vos_dtx_del_cos(struct vos_container *cont, daos_unit_oid_t *oid, struct dtx_id *xid, uint64_t dkey_hash, bool punch); diff --git a/src/vos/vos_obj_index.c b/src/vos/vos_obj_index.c index 1db6ec994ab..20d08c8c405 100644 --- a/src/vos/vos_obj_index.c +++ b/src/vos/vos_obj_index.c @@ -145,6 +145,8 @@ oi_rec_free(struct btr_instance *tins, struct btr_record *rec, void *args) return rc; } + vos_ilog_ts_evict(&obj->vo_ilog, VOS_TS_TYPE_OBJ); + D_ASSERT(tins->ti_priv); return gc_add_item((struct vos_pool *)tins->ti_priv, GC_OBJ, rec->rec_off, 0); diff --git a/src/vos/vos_tree.c b/src/vos/vos_tree.c index 84d90e10bf7..29faabb0312 100644 --- a/src/vos/vos_tree.c +++ b/src/vos/vos_tree.c @@ -361,6 +361,9 @@ ktr_rec_free(struct btr_instance *tins, struct btr_record *rec, void *args) if (rc != 0) return rc; + vos_ilog_ts_evict(&krec->kr_ilog, (krec->kr_bmap & KREC_BF_DKEY) ? + VOS_TS_TYPE_DKEY : VOS_TS_TYPE_AKEY); + D_ASSERT(tins->ti_priv); gc = (krec->kr_bmap & KREC_BF_DKEY) ? GC_DKEY : GC_AKEY; return gc_add_item((struct vos_pool *)tins->ti_priv, gc, diff --git a/src/vos/vos_ts.c b/src/vos/vos_ts.c index 9668f1e9a74..9a09501520c 100644 --- a/src/vos/vos_ts.c +++ b/src/vos/vos_ts.c @@ -30,14 +30,14 @@ #include "vos_internal.h" -#define DEFINE_TS_STR(type, desc, count, child_count) desc, desc "_nochild", +#define DEFINE_TS_STR(type, desc, count) desc, /** Strings corresponding to timestamp types */ static const char * const type_strs[] = { D_FOREACH_TS_TYPE(DEFINE_TS_STR) }; -#define DEFINE_TS_COUNT(type, desc, count, child_count) count, child_count, +#define DEFINE_TS_COUNT(type, desc, count) count, static const uint32_t type_counts[] = { D_FOREACH_TS_TYPE(DEFINE_TS_COUNT) }; @@ -46,19 +46,121 @@ static const uint32_t type_counts[] = { #define DKEY_MISS_SIZE (1 << 5) #define AKEY_MISS_SIZE (1 << 4) +#define TS_TRACE(action, entry, idx, type) \ + D_DEBUG(DB_TRACE, "%s %s at idx %d(%p), read.hi="DF_U64 \ + " read.lo="DF_U64" write="DF_U64"\n", action, \ + type_strs[type], idx, (entry)->te_record_ptr, \ + (entry)->te_ts_rh, (entry)->te_ts_rl, (entry)->te_ts_w) + +/** This probably needs more thought */ +static bool +ts_update_on_evict(struct vos_ts_table *ts_table, struct vos_ts_entry *entry) +{ + struct vos_ts_entry *parent = NULL; + struct vos_ts_entry *other = NULL; + struct vos_ts_info *info = entry->te_info; + struct vos_ts_info *parent_info; + struct vos_ts_info *neg_info = NULL; + uint32_t *idx; + + if (entry->te_record_ptr == NULL) + return false; + + if (entry->te_parent_ptr != NULL) { + if (info->ti_type & 1) { /* negative entry */ + parent_info = info - 1; + } else { + parent_info = info - 2; + neg_info = info - 1; + } + lrua_lookup(parent_info->ti_array, entry->te_parent_ptr, + (void **)&parent); + if (neg_info == NULL) { + other = parent; + } else if (parent != NULL) { + idx = &parent->te_miss_idx[entry->te_hash_idx]; + lrua_lookup(neg_info->ti_array, idx, (void **)&other); + } + } + + if (other == NULL) { + ts_table->tt_ts_rl = MAX(ts_table->tt_ts_rl, entry->te_ts_rl); + ts_table->tt_ts_rh = MAX(ts_table->tt_ts_rh, entry->te_ts_rh); + ts_table->tt_ts_w = MAX(ts_table->tt_ts_w, entry->te_ts_w); + return true; + } + + other->te_ts_rl = MAX(other->te_ts_rl, entry->te_ts_rl); + other->te_ts_rh = MAX(other->te_ts_rh, entry->te_ts_rh); + other->te_ts_w = MAX(other->te_ts_w, entry->te_ts_w); + + return true; +} + +static inline void +evict_children(struct vos_ts_info *info, struct vos_ts_entry *entry) +{ + int i; + uint32_t *idx; + uint32_t cache_num; + + info = entry->te_info; + + if ((info->ti_type == VOS_TS_TYPE_AKEY) || (info->ti_type & 1) != 0) + return; + + cache_num = info->ti_cache_mask + 1; + info++; + for (i = 0; i < cache_num; i++) { + /* Also evict the children, if present */ + idx = &entry->te_miss_idx[i]; + lrua_evict(info->ti_array, idx); + } +} + + +static void evict_entry(void *payload, uint32_t idx, void *arg) +{ + struct vos_ts_info *info = arg; + struct vos_ts_entry *entry = payload; + + evict_children(info, entry); + + if (ts_update_on_evict(info->ti_table, entry)) { + TS_TRACE("Evicted", entry, idx, info->ti_type); + entry->te_record_ptr = NULL; + } +} + +static void init_entry(void *payload, uint32_t idx, void *arg) +{ + struct vos_ts_info *info = arg; + struct vos_ts_entry *entry = payload; + uint32_t count; + + entry->te_info = info; + if (info->ti_misses) { + D_ASSERT((info->ti_type & 1) == 0 && + info->ti_type != VOS_TS_TYPE_AKEY && + info->ti_cache_mask != 0); + count = info->ti_cache_mask + 1; + entry->te_miss_idx = &info->ti_misses[idx * count]; + } +} + +static const struct lru_callbacks lru_cbs = { + .lru_on_evict = evict_entry, + .lru_on_init = init_entry, +}; + int vos_ts_table_alloc(struct vos_ts_table **ts_tablep) { struct vos_ts_table *ts_table; struct vos_ts_info *info; - struct vos_ts_entry *current; - uint32_t sofar = 0; - uint32_t cur_idx; - uint32_t next_idx; - uint32_t prev_idx; - uint32_t i, count, offset; + int rc; + uint32_t i; uint32_t miss_size; - uint32_t *misses; uint32_t *miss_cursor; *ts_tablep = NULL; @@ -67,38 +169,25 @@ vos_ts_table_alloc(struct vos_ts_table **ts_tablep) if (ts_table == NULL) return -DER_NOMEM; - D_ALLOC_ARRAY(misses, (type_counts[VOS_TS_TYPE_CONT] * OBJ_MISS_SIZE) + - (type_counts[VOS_TS_TYPE_OBJ] * DKEY_MISS_SIZE) + - (type_counts[VOS_TS_TYPE_DKEY] * AKEY_MISS_SIZE)); - if (misses == NULL) { - D_FREE(ts_table); - return -DER_NOMEM; + D_ALLOC_ARRAY(ts_table->tt_misses, + (type_counts[VOS_TS_TYPE_CONT] * OBJ_MISS_SIZE) + + (type_counts[VOS_TS_TYPE_OBJ] * DKEY_MISS_SIZE) + + (type_counts[VOS_TS_TYPE_DKEY] * AKEY_MISS_SIZE)); + if (ts_table->tt_misses == NULL) { + rc = -DER_NOMEM; + goto free_table; } ts_table->tt_ts_rl = vos_start_epoch; ts_table->tt_ts_rh = vos_start_epoch; ts_table->tt_ts_w = vos_start_epoch; - miss_cursor = misses; - cur_idx = 0; + miss_cursor = ts_table->tt_misses; for (i = 0; i < VOS_TS_TYPE_COUNT; i++) { info = &ts_table->tt_type_info[i]; - count = type_counts[i]; - - if (count == 0) { - D_ASSERT(i == VOS_TS_TYPE_AKEY); - count = VOS_TS_SIZE - sofar; - /** More akeys than missing akeys */ - D_ASSERT(count > type_counts[VOS_TS_TYPE_DKEY_CHILD]); - /** Make sure it doesn't overflow */ - D_ASSERT(count < VOS_TS_SIZE); - } else { - sofar += count; - } - info->ti_count = count; info->ti_type = i; - - offset = cur_idx; + info->ti_count = type_counts[i]; + info->ti_table = ts_table; switch (i) { case VOS_TS_TYPE_CONT: miss_size = OBJ_MISS_SIZE; @@ -114,138 +203,46 @@ vos_ts_table_alloc(struct vos_ts_table **ts_tablep) miss_size = 0; break; } - - if (miss_size != 0) + if (miss_size) { info->ti_cache_mask = miss_size - 1; - info->ti_lru = cur_idx; - prev_idx = info->ti_mru = offset + count - 1; - while (cur_idx < (offset + count)) { - next_idx = offset + ((cur_idx + 1 - offset) % count); - current = &ts_table->tt_table[cur_idx]; - current->te_info = info; - current->te_next_idx = next_idx; - current->te_prev_idx = prev_idx; - prev_idx = cur_idx; - cur_idx++; - if (miss_size == 0) - continue; - current->te_miss_idx = miss_cursor; - miss_cursor += miss_size; + info->ti_misses = miss_cursor; + miss_cursor += info->ti_count * miss_size; } + + rc = lrua_array_alloc(&info->ti_array, info->ti_count, + sizeof(struct vos_ts_entry), &lru_cbs, + info); + if (rc != 0) + goto cleanup; } *ts_tablep = ts_table; return 0; -} -void -vos_ts_table_free(struct vos_ts_table **ts_tablep) -{ - struct vos_ts_table *ts_table = *ts_tablep; - - /** entry 0 points to start of allocated space */ - D_FREE(ts_table->tt_table[0].te_miss_idx); +cleanup: + for (i = 0; i < VOS_TS_TYPE_COUNT; i++) + lrua_array_free(ts_table->tt_type_info[i].ti_array); + D_FREE(ts_table->tt_misses); +free_table: D_FREE(ts_table); - *ts_tablep = NULL; -} - -/** This probably needs more thought */ -static bool -ts_update_on_evict(struct vos_ts_table *ts_table, struct vos_ts_entry *entry) -{ - struct vos_ts_entry *parent = NULL; - struct vos_ts_entry *other = NULL; - struct vos_ts_info *info = entry->te_info; - uint32_t *idx; - - if (entry->te_record_ptr == NULL) - return false; - - if (entry->te_parent_ptr != NULL) { - parent = vos_ts_lookup_idx(ts_table, entry->te_parent_ptr); - if (info->ti_type & 1) { /* negative entry */ - other = parent; - } else if (parent != NULL) { - idx = &parent->te_miss_idx[entry->te_hash_idx]; - other = vos_ts_lookup_idx(ts_table, idx); - } - } - - if (other == NULL) { - ts_table->tt_ts_rl = MAX(ts_table->tt_ts_rl, entry->te_ts_rl); - ts_table->tt_ts_rh = MAX(ts_table->tt_ts_rh, entry->te_ts_rh); - ts_table->tt_ts_w = MAX(ts_table->tt_ts_w, entry->te_ts_w); - return true; - } - - other->te_ts_rl = MAX(other->te_ts_rl, entry->te_ts_rl); - other->te_ts_rh = MAX(other->te_ts_rh, entry->te_ts_rh); - other->te_ts_w = MAX(other->te_ts_w, entry->te_ts_w); - - return true; -} - -#define TS_TRACE(action, entry, idx, type) \ - D_DEBUG(DB_TRACE, "%s %s at idx %d(%p), read.hi="DF_U64 \ - " read.lo="DF_U64" write="DF_U64"\n", action, \ - type_strs[type], idx, (entry)->te_record_ptr, \ - (entry)->te_ts_rh, (entry)->te_ts_rl, (entry)->te_ts_w) - -static inline void -evict_one(struct vos_ts_table *ts_table, struct vos_ts_entry *entry, - uint32_t idx, struct vos_ts_info *info, bool removed) -{ - if (ts_update_on_evict(ts_table, entry)) { - TS_TRACE("Evicted", entry, idx, info->ti_type); - entry->te_record_ptr = NULL; - } - - if (removed) - return; - - if (info->ti_mru == idx) - info->ti_mru = entry->te_prev_idx; - - if (info->ti_lru == idx) - return; - - /** Remove the entry from it's current location */ - remove_ts_entry(&ts_table->tt_table[0], entry); - - /** insert the entry at the LRU */ - insert_ts_entry(&ts_table->tt_table[0], entry, idx, info->ti_mru, - info->ti_lru); - - info->ti_lru = idx; + return rc; } -static inline void -evict_children(struct vos_ts_table *ts_table, struct vos_ts_info *info, - struct vos_ts_entry *entry) +void +vos_ts_table_free(struct vos_ts_table **ts_tablep) { - struct vos_ts_entry *child; + struct vos_ts_table *ts_table = *ts_tablep; int i; - uint32_t idx; - uint32_t cache_num; - info = entry->te_info; - - if ((info->ti_type == VOS_TS_TYPE_AKEY) || (info->ti_type & 1) != 0) - return; + for (i = 0; i < VOS_TS_TYPE_COUNT; i++) + lrua_array_free(ts_table->tt_type_info[i].ti_array); - cache_num = info->ti_cache_mask + 1; - info++; - for (i = 0; i < cache_num; i++) { - /* Also evict the children, if present */ - idx = entry->te_miss_idx[i] & VOS_TS_MASK; - child = &ts_table->tt_table[idx]; - if (child->te_record_ptr != &entry->te_miss_idx[i]) - continue; + D_FREE(ts_table->tt_misses); + D_FREE(ts_table); - evict_one(ts_table, child, idx, info, false); - } + *ts_tablep = NULL; } void @@ -258,16 +255,8 @@ vos_ts_evict_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *parent, struct vos_ts_info *info = &ts_table->tt_type_info[type]; uint32_t *neg_idx; - /** Ok, grab and evict the LRU */ - *idx = info->ti_lru; - entry = &ts_table->tt_table[*idx]; - info->ti_lru = entry->te_next_idx; - info->ti_mru = *idx; - - if (entry->te_record_ptr != NULL) { - evict_children(ts_table, info, entry); - evict_one(ts_table, entry, *idx, info, true); - } + entry = lrua_alloc(ts_table->tt_type_info[type].ti_array, idx, + true); if (parent == NULL) { /** Use global timestamps for the type to initialize it */ @@ -279,7 +268,8 @@ vos_ts_evict_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *parent, entry->te_parent_ptr = parent->te_record_ptr; if ((type & 1) == 0) { /* positive entry */ neg_idx = &parent->te_miss_idx[hash_idx]; - ts_source = vos_ts_lookup_idx(ts_table, neg_idx); + lrua_lookup(parent->te_info->ti_array, neg_idx, + (void **)&ts_source); } if (ts_source == NULL) /* for negative and uncached entries */ ts_source = parent; @@ -302,17 +292,6 @@ vos_ts_evict_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *parent, *entryp = entry; } -void -vos_ts_evict_entry(struct vos_ts_table *ts_table, struct vos_ts_entry *entry, - uint32_t idx) -{ - struct vos_ts_info *info = entry->te_info; - - evict_children(ts_table, info, entry); - - evict_one(ts_table, entry, idx, info, false); -} - int vos_ts_set_allocate(struct vos_ts_set **ts_set, uint64_t flags, uint32_t akey_nr) diff --git a/src/vos/vos_ts.h b/src/vos/vos_ts.h index 808f700c3ee..8af6ff553e8 100644 --- a/src/vos/vos_ts.h +++ b/src/vos/vos_ts.h @@ -30,13 +30,18 @@ #ifndef __VOS_TS__ #define __VOS_TS__ +#include #include +struct vos_ts_table; + struct vos_ts_info { - /** Least recently accessed index */ - uint32_t ti_lru; - /** Most recently accessed index */ - uint32_t ti_mru; + /** The LRU array */ + struct lru_array *ti_array; + /** Back pointer to table */ + struct vos_ts_table *ti_table; + /** Miss indexes for the type */ + uint32_t *ti_misses; /** Type identifier */ uint32_t ti_type; /** mask for hash of negative entries */ @@ -47,8 +52,8 @@ struct vos_ts_info { struct vos_ts_entry { struct vos_ts_info *te_info; - /** Uniquely identifies the record */ - void *te_record_ptr; + /** Key for current occupant */ + uint32_t *te_record_ptr; /** Uniquely identifies the parent record */ uint32_t *te_parent_ptr; /** negative entry cache */ @@ -69,10 +74,6 @@ struct vos_ts_entry { uuid_t te_tx_rh; /** write tx */ uuid_t te_tx_w; - /** Next most recently used */ - uint32_t te_next_idx; - /** Previous most recently used */ - uint32_t te_prev_idx; /** Hash index in parent */ uint32_t te_hash_idx; }; @@ -98,24 +99,22 @@ struct vos_ts_set { struct vos_ts_set_entry ts_entries[0]; }; -/** Table will be per xstream */ -#define VOS_TS_BITS 23 -#define VOS_TS_SIZE (1 << VOS_TS_BITS) -#define VOS_TS_MASK (VOS_TS_SIZE - 1) - -/** Timestamp types */ +/** Timestamp types (should all be powers of 2) */ #define D_FOREACH_TS_TYPE(ACTION) \ - ACTION(VOS_TS_TYPE_CONT, "container", 1024, 32 * 1024) \ - ACTION(VOS_TS_TYPE_OBJ, "object", 96 * 1024, 128 * 1024) \ - ACTION(VOS_TS_TYPE_DKEY, "dkey", 896 * 1024, 1024 * 1024) \ - ACTION(VOS_TS_TYPE_AKEY, "akey", 0, 0) \ + ACTION(VOS_TS_TYPE_CONT, "container", 1024) \ + ACTION(VOS_TS_TYPE_OBJ_MISS, "object miss", 32 * 1024) \ + ACTION(VOS_TS_TYPE_OBJ, "object", 64 * 1024) \ + ACTION(VOS_TS_TYPE_DKEY_MISS, "dkey miss", 128 * 1024) \ + ACTION(VOS_TS_TYPE_DKEY, "dkey", 512 * 1024) \ + ACTION(VOS_TS_TYPE_AKEY_MISS, "akey miss", 1024 * 1024) \ + ACTION(VOS_TS_TYPE_AKEY, "akey", 4 * 1024 * 1024) -#define DEFINE_TS_TYPE(type, desc, count, child_count) type, type##_CHILD, +#define DEFINE_TS_TYPE(type, desc, count) type, enum { D_FOREACH_TS_TYPE(DEFINE_TS_TYPE) /** Number of timestamp types */ - VOS_TS_TYPE_COUNT = VOS_TS_TYPE_AKEY_CHILD, + VOS_TS_TYPE_COUNT, }; struct vos_ts_table { @@ -125,79 +124,12 @@ struct vos_ts_table { daos_epoch_t tt_ts_rh; /** Global write timestamp for type */ daos_epoch_t tt_ts_w; + /** Miss index table */ + uint32_t *tt_misses; /** Timestamp table pointers for a type */ struct vos_ts_info tt_type_info[VOS_TS_TYPE_COUNT]; - /** The table entries */ - struct vos_ts_entry tt_table[VOS_TS_SIZE]; }; -/** Internal API: Evict the LRU, move it to MRU, update relevant time stamps, - * and return the index - */ -void -vos_ts_evict_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *parent, - struct vos_ts_entry **entryp, uint32_t *idx, uint32_t hash_idx, - uint32_t type); - -/** Internal API: Evict selected entry from the cache, update global - * timestamps - */ -void -vos_ts_evict_entry(struct vos_ts_table *ts_table, struct vos_ts_entry *entry, - uint32_t idx); - -/** Internal API: Remove an entry from the lru list */ -static inline void -remove_ts_entry(struct vos_ts_entry *entries, struct vos_ts_entry *entry) -{ - struct vos_ts_entry *prev = &entries[entry->te_prev_idx]; - struct vos_ts_entry *next = &entries[entry->te_next_idx]; - - prev->te_next_idx = entry->te_next_idx; - next->te_prev_idx = entry->te_prev_idx; -} - -/** Internal API: Insert an entry in the lru list */ -static inline void -insert_ts_entry(struct vos_ts_entry *entries, struct vos_ts_entry *entry, - uint32_t idx, uint32_t prev_idx, uint32_t next_idx) -{ - struct vos_ts_entry *prev; - struct vos_ts_entry *next; - - prev = &entries[prev_idx]; - next = &entries[next_idx]; - next->te_prev_idx = idx; - prev->te_next_idx = idx; - entry->te_prev_idx = prev_idx; - entry->te_next_idx = next_idx; -} - -/** Internal API: Make the entry the mru */ -static inline void -move_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *entry, - uint32_t idx) -{ - struct vos_ts_info *info = entry->te_info; - - if (info->ti_mru == idx) { - /** Already the mru */ - return; - } - - if (info->ti_lru == idx) - info->ti_lru = entry->te_next_idx; - - /** First remove */ - remove_ts_entry(&ts_table->tt_table[0], entry); - - /** Now add */ - insert_ts_entry(&ts_table->tt_table[0], entry, idx, info->ti_mru, - info->ti_lru); - - info->ti_mru = idx; -} - /** Internal API: Grab the parent entry from the set */ static inline struct vos_ts_entry * ts_set_get_parent(struct vos_ts_set *ts_set) @@ -218,22 +150,6 @@ ts_set_get_parent(struct vos_ts_set *ts_set) } -/** Internal API to lookup entry from index */ -static inline struct vos_ts_entry * -vos_ts_lookup_idx(struct vos_ts_table *ts_table, uint32_t *idx) -{ - struct vos_ts_entry *entry; - uint32_t tindex = *idx & VOS_TS_MASK; - - entry = &ts_table->tt_table[tindex]; - if (entry->te_record_ptr == idx) { - move_lru(ts_table, entry, tindex); - return entry; - } - - return NULL; -} - /** Reset the index in the set so an entry can be replaced * * \param ts_set[in] The timestamp set @@ -255,6 +171,30 @@ vos_ts_set_reset(struct vos_ts_set *ts_set, uint32_t type, uint32_t akey_nr) ts_set->ts_init_count = idx; } +static inline bool +vos_ts_lookup_internal(struct vos_ts_set *ts_set, uint32_t type, uint32_t *idx, + struct vos_ts_entry **entryp) +{ + struct vos_ts_table *ts_table = vos_ts_table_get(); + struct vos_ts_info *info = &ts_table->tt_type_info[type]; + void *entry; + struct vos_ts_set_entry set_entry = {0}; + bool found; + + ts_table = vos_ts_table_get(); + + found = lrua_lookup(info->ti_array, idx, &entry); + if (found) { + D_ASSERT(ts_set->ts_set_size != ts_set->ts_init_count); + set_entry.se_entry = entry; + ts_set->ts_entries[ts_set->ts_init_count++] = set_entry; + *entryp = entry; + return true; + } + + return false; +} + /** Lookup an entry in the timestamp cache and save it to the set. * * \param ts_set[in] The timestamp set @@ -269,9 +209,7 @@ static inline bool vos_ts_lookup(struct vos_ts_set *ts_set, uint32_t *idx, bool reset, struct vos_ts_entry **entryp) { - struct vos_ts_table *ts_table = vos_ts_table_get(); - struct vos_ts_entry *entry; - struct vos_ts_set_entry set_entry = {0}; + uint32_t type; *entryp = NULL; @@ -281,20 +219,17 @@ vos_ts_lookup(struct vos_ts_set *ts_set, uint32_t *idx, bool reset, if (reset) ts_set->ts_init_count--; - ts_table = vos_ts_table_get(); + type = MIN(ts_set->ts_init_count * 2, VOS_TS_TYPE_AKEY); - entry = vos_ts_lookup_idx(ts_table, idx); - if (entry != NULL) { - D_ASSERT(ts_set->ts_set_size != ts_set->ts_init_count); - set_entry.se_entry = entry; - ts_set->ts_entries[ts_set->ts_init_count++] = set_entry; - *entryp = entry; - return true; - } - - return false; + return vos_ts_lookup_internal(ts_set, type, idx, entryp); } +/** Internal function to evict LRU and initialize an entry */ +void +vos_ts_evict_lru(struct vos_ts_table *ts_table, struct vos_ts_entry *parent, + struct vos_ts_entry **new_entry, uint32_t *idx, + uint32_t hash_idx, uint32_t new_type); + /** Allocate a new entry in the set. Lookup should be called first and this * should only be called if it returns false. * @@ -319,6 +254,7 @@ vos_ts_alloc(struct vos_ts_set *ts_set, uint32_t *idx, uint64_t hash) if (ts_set == NULL) return NULL; + ts_table = vos_ts_table_get(); parent = ts_set_get_parent(ts_set); @@ -444,8 +380,6 @@ vos_ts_get_negative(struct vos_ts_set *ts_set, uint64_t hash, bool reset) D_ASSERT(parent != NULL); - ts_table = vos_ts_table_get(); - info = parent->te_info; if (info->ti_type & 1) { /** Parent is a negative entry, just reuse it @@ -455,12 +389,18 @@ vos_ts_get_negative(struct vos_ts_set *ts_set, uint64_t hash, bool reset) goto add_to_set; } + ts_table = vos_ts_table_get(); + idx = hash & info->ti_cache_mask; - if (vos_ts_lookup(ts_set, &parent->te_miss_idx[idx], false, &neg_entry)) + if (vos_ts_lookup_internal(ts_set, info->ti_type + 1, + &parent->te_miss_idx[idx], &neg_entry)) { + D_ASSERT(idx == neg_entry->te_hash_idx); goto out; + } vos_ts_evict_lru(ts_table, parent, &neg_entry, &parent->te_miss_idx[idx], idx, info->ti_type + 1); + D_ASSERT(idx == neg_entry->te_hash_idx); add_to_set: set_entry.se_entry = neg_entry; ts_set->ts_entries[ts_set->ts_init_count++] = set_entry; @@ -478,17 +418,11 @@ vos_ts_get_negative(struct vos_ts_set *ts_set, uint64_t hash, bool reset) * \param type[in] Type of the object */ static inline void -vos_ts_evict(uint32_t *idx) +vos_ts_evict(uint32_t *idx, uint32_t type) { struct vos_ts_table *ts_table = vos_ts_table_get(); - struct vos_ts_entry *entry; - uint32_t tindex = *idx & VOS_TS_MASK; - - entry = &ts_table->tt_table[tindex]; - if (entry->te_record_ptr != idx) - return; - vos_ts_evict_entry(ts_table, entry, *idx); + lrua_evict(ts_table->tt_type_info[type].ti_array, idx); } /** Allocate thread local timestamp cache. Set the initial global times diff --git a/utils/build.config b/utils/build.config index ab0e33df8a9..3f4c89dcc9d 100644 --- a/utils/build.config +++ b/utils/build.config @@ -5,7 +5,7 @@ component=daos ARGOBOTS = v1.0 PMDK = 1.8 ISAL = v2.26.0 -SPDK = v19.04.1 +SPDK = v20.01.1 FUSE = 7bf25b6987d84c816aebd5325b95cfa0d311b1e6 FIO = e3ccbdd5f93d33162a93000586461ac6bba5a7d3 OFI = 62f6c937601776dac8a1f97c8bb1b1a6acfbc3c0 diff --git a/utils/docker/Dockerfile.centos.7 b/utils/docker/Dockerfile.centos.7 index 009afacf0f7..6d3d11ffbaf 100644 --- a/utils/docker/Dockerfile.centos.7 +++ b/utils/docker/Dockerfile.centos.7 @@ -126,7 +126,7 @@ gpgcheck=False\n" >> /etc/yum.repos.d/$repo:$branch:$build_number.repo; # force an upgrade to get any newly built RPMs ARG CACHEBUST=1 -RUN yum -y upgrade --exclude=spdk,spdk-devel,dpdk-devel,dpdk,mercury-devel,mercury,fio +RUN yum -y upgrade --exclude=spdk,spdk-devel,dpdk-devel,dpdk,mercury-devel,mercury # Switch to new user #USER $USER diff --git a/utils/docker/Dockerfile.code_scanning b/utils/docker/Dockerfile.code_scanning new file mode 100755 index 00000000000..bdd68e62072 --- /dev/null +++ b/utils/docker/Dockerfile.code_scanning @@ -0,0 +1,15 @@ +# +# Copyright 2018-2020, Intel Corporation +# +# 'recipe' for Docker for code scanning. +# + +# Pull base image +FROM fedora:latest +MAINTAINER daos-stack + +# use same UID as host and default value of 1000 if not specified +ARG UID=1000 + +# Install Python Bandit scanner. +RUN dnf -y install bandit diff --git a/utils/rpms/daos.spec b/utils/rpms/daos.spec index 63ba8a92a90..e2de76bf745 100644 --- a/utils/rpms/daos.spec +++ b/utils/rpms/daos.spec @@ -4,12 +4,12 @@ %global mercury_version 2.0.0a1-0.7.git.41caa14%{?dist} -# Unlimited maximum version -%global spdk_max_version 1000 +%global spdk_max_version 21 +%global spdk_min_version 19 Name: daos Version: 1.1.0 -Release: 8%{?relval}%{?dist} +Release: 9%{?relval}%{?dist} Summary: DAOS Storage Engine License: Apache @@ -34,8 +34,8 @@ BuildRequires: libabt-devel >= 1.0rc1 BuildRequires: libpmem-devel, libpmemobj-devel BuildRequires: fuse-devel >= 3.4.2 BuildRequires: protobuf-c-devel -BuildRequires: spdk-devel <= %{spdk_max_version}, spdk-tools <= %{spdk_max_version} -BuildRequires: fio < 3.4 +BuildRequires: spdk-devel > %{spdk_min_version}, spdk-devel < %{spdk_max_version} +BuildRequires: spdk-tools > %{spdk_min_version}, spdk-tools < %{spdk_max_version} %if (0%{?rhel} >= 7) BuildRequires: libisa-l-devel %else @@ -82,9 +82,6 @@ BuildRequires: libcurl4 # have choice for libpsm_infinipath.so.1()(64bit) needed by openmpi-libs: libpsm2-compat libpsm_infinipath1 BuildRequires: libpsm_infinipath1 %endif # 0%{?is_opensuse} -# have choice for libpmemblk.so.1(LIBPMEMBLK_1.0)(64bit) needed by fio: libpmemblk libpmemblk1 -# have choice for libpmemblk.so.1()(64bit) needed by fio: libpmemblk libpmemblk1 -BuildRequires: libpmemblk1 %endif # (0%{?suse_version} >= 1315) %endif # (0%{?rhel} >= 7) %if (0%{?suse_version} >= 1500) @@ -92,8 +89,8 @@ Requires: libpmem1, libpmemobj1 %endif Requires: fuse >= 3.4.2 Requires: protobuf-c -Requires: spdk <= %{spdk_max_version} Requires: fio < 3.4 +Requires: spdk > %{spdk_min_version}, spdk < %{spdk_max_version} Requires: openssl # This should only be temporary until we can get a stable upstream release # of mercury, at which time the autoprov shared library version should @@ -115,7 +112,7 @@ to optimize performance and cost. Summary: The DAOS server Requires: %{name} = %{version}-%{release} Requires: %{name}-client = %{version}-%{release} -Requires: spdk-tools <= %{spdk_max_version} +Requires: spdk-tools > %{spdk_min_version}, spdk-tools < %{spdk_max_version} Requires: ndctl Requires: ipmctl Requires: hwloc @@ -349,6 +346,9 @@ getent group daos_admins >/dev/null || groupadd -r daos_admins %{_libdir}/*.a %changelog +* Mon Mar 30 2020 Tom Nabarro - 1.1.0-9 +- Set version of spdk to < v21, > v19 + * Fri Mar 27 2020 David Quigley - 1.1.0-8 - add daos and dmg man pages to the daos-client files list diff --git a/utils/sl/components/__init__.py b/utils/sl/components/__init__.py index f43dbe86b61..5482b1e477f 100644 --- a/utils/sl/components/__init__.py +++ b/utils/sl/components/__init__.py @@ -293,15 +293,16 @@ def define_components(reqs): retriever = GitRepoRetriever("https://github.com/spdk/spdk.git", True) reqs.define('spdk', retriever=retriever, - commands=['./configure --prefix="$SPDK_PREFIX" --with-shared ' \ - ' --with-fio="$FIO_SRC"', - 'make $JOBS_OPT', 'make install', + commands=['./configure --prefix="$SPDK_PREFIX"' \ + ' --disable-tests --without-vhost --without-crypto' \ + ' --without-pmdk --without-vpp --without-rbd' \ + ' --with-rdma --with-shared' \ + ' --without-iscsi-initiator --without-isal' \ + ' --without-vtune', 'make $JOBS_OPT', 'make install', 'cp dpdk/build/lib/* "$SPDK_PREFIX/lib"', 'mkdir -p "$SPDK_PREFIX/share/spdk"', - 'cp -r include scripts examples/nvme/fio_plugin ' \ - '"$SPDK_PREFIX/share/spdk"'], - libs=['spdk'], - requires=['fio']) + 'cp -r include scripts "$SPDK_PREFIX/share/spdk"'], + libs=['rte_bus_pci']) url = 'https://github.com/protobuf-c/protobuf-c/releases/download/' \ 'v1.3.0/protobuf-c-1.3.0.tar.gz'